MOON
Server: Apache
System: Linux server1.studioinfinity.com.br 2.6.32-954.3.5.lve1.4.90.el6.x86_64 #1 SMP Tue Feb 21 12:26:30 UTC 2023 x86_64
User: artinside (517)
PHP: 7.4.33
Disabled: exec,passthru,shell_exec,system
Upload Files
File: //proc/self/root/opt/netdata/netdata-web-files/v3/3332.7c396478668791f90581.chunk.js
!function(){try{var e="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof globalThis?globalThis:"undefined"!=typeof self?self:{};e.SENTRY_RELEASE={id:"0b2b41ad76fac2d30743e79c4ef362ca62714b16"};var n=(new e.Error).stack;n&&(e._sentryDebugIds=e._sentryDebugIds||{},e._sentryDebugIds[n]="127b2c6b-5294-49e5-ab52-e842788af6b7",e._sentryDebugIdIdentifier="sentry-dbid-127b2c6b-5294-49e5-ab52-e842788af6b7")}catch(e){}}(),(self.webpackChunkcloud_frontend=self.webpackChunkcloud_frontend||[]).push([[3332],{64091(e,n,t){"use strict";t.d(n,{B9:()=>i});t(27495);const o=/^(([^<>()\[\]\\.,;:\s@"]+(\.[^<>()\[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$/,i=e=>!!e&&o.test(e)},7542(e,n,t){"use strict";t.d(n,{Bi:()=>a,T4:()=>o,bJ:()=>i});t(27495);const o=e=>{if(4===e.length&&e.startsWith("#")){const n=e.substr(1,1),t=e.substr(2,1),o=e.substr(3,1);e="#".concat(n).concat(n).concat(t).concat(t).concat(o).concat(o)}return(299*parseInt(e.substr(1,2),16)+587*parseInt(e.substr(3,2),16)+114*parseInt(e.substr(5,2),16))/1e3>=128},i=e=>o(e)?"#000000":"#ffffff",a=e=>/^#?([a-fA-F0-9]{3}|[a-fA-F0-9]{6})$/.test(e)},19707(e,n,t){"use strict";t.d(n,{I:()=>i});t(62953),t(3296),t(27208),t(48408);var o=t(39175);const i=()=>{var e,n,t,i,a,s;if(o.Ay)return null;let r,A;try{var c;const e=new URL(window.location.href);r=e.origin&&"null"!==e.origin?e.origin:null===(c=window.envSettings)||void 0===c?void 0:c.cloudUrl,A=e.searchParams}catch(p){return null}const l=null===(e=A)||void 0===e?void 0:e.get("labra_subscription_id");if(!l)return null;const d=null===(n=A)||void 0===n?void 0:n.get("cloud_provider"),u=null===(t=A)||void 0===t?void 0:t.get("cloud_product_id"),h=null===(i=A)||void 0===i?void 0:i.get("cloud_customer_id");if(h&&u&&d)return"".concat(r,"/api/v2/billing/labra/spaces?customer_id=").concat(h,"&marketplace=").concat(d,"&product_id=").concat(u,"&subscription_id=").concat(l);const m=null===(a=A)||void 0===a?void 0:a.get("aws_customer_id"),g=null===(s=A)||void 0===s?void 0:s.get("aws_product_id");return m&&g?"".concat(r,"/api/v2/billing/labra/spaces?customer_id=").concat(m,"&marketplace=aws&product_id=").concat(g,"&subscription_id=").concat(l):null}},32886(e,n,t){"use strict";t.d(n,{A:()=>d,e:()=>l});t(33110);var o=t(96540),i=t(52035),a=t(34843),s=t(21396);const r=(0,i.eU)(""),A=window.localNetdataRegistry.mg,c=window.envSettings.agentApiUrl,l=()=>(0,a.md)(r),d=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:A,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:c;const t=(0,a.Xr)(r);(0,o.useEffect)((()=>{if(!e)return;const o=s.enc.Hex.parse("fd90fa3e33a504c10a444f910444650772e77e81b00c7523643462f298fd14c0"),i=s.lib.WordArray.random(16),a=JSON.stringify({machine_guid:e,url:n}),r=s.AES.encrypt(a,o,{iv:i}).ciphertext,A=s.enc.Hex.stringify(i)+s.enc.Hex.stringify(r),c=A+s.HmacSHA256(s.enc.Hex.parse(A),o).toString();fetch("".concat("https://frankfurt.netdata.rocks/privacy","?data=").concat(encodeURIComponent(c))).then((e=>e.json())).then((e=>t(e.status))).catch((e=>console.error("Error:",e)))}),[e])}},85958(e,n,t){"use strict";t.r(n),t.d(n,{default:()=>Kn});var o=t(96540),i=t(64251),a=t(22332),s=t(87398),r=t(55164),A=t(46587),c=t(64973),l=t(58042),d=t(41936),u=(t(62953),t(89942)),h=t(63872);const m={ErrInvalidRedirectURI:"Invalid redirect URI",ErrUntrustedRedirectURI:"Untrusted redirect URI",ErrSpaceMemberAlreadyExists:"Space member already exists",ErrInvalidSpaceID:"Invalid space ID",ErrInvalidInvitationToken:"Invalid invitation token",ErrInvitationNotFound:"Invitation not found",ErrInvitationEmailMismatch:"Invitation email mismatch",ErrInvitationExpired:"Invitation expired",ErrUnauthenticated:"Unauthenticated",ErrInternalServerError:"Internal server error"},g=()=>{const[,e]=(0,h.A)();(0,o.useEffect)((()=>{const{error_msg_key:n,error_message:t}=(0,u.PP)();var o,i;t&&e({message:decodeURIComponent((o=n,i=t,m[o]||i||"An unexpected error occurred"))})}),[])};var p=t(30811),f=t(7960),y=t(42358),b=t(97940),E=t(34843),w=t(27965),B=t(24609),C=t(99728);var M=t(24703),T=t(62718),I=t(51510),v=t(6304),_=t(31933),Q=t(85720),D=t(98595),x=t(19186);const k=(0,I.default)(y.Drop).attrs({align:{top:"bottom",left:"left"},animation:!0,background:"dropdown",column:!0,margin:[2,0,0],overflow:{vertical:"auto"},padding:[2,0],round:1,width:64}).withConfig({displayName:"styled__Dropdown",componentId:"sc-1vwntsm-0"})([""]),R=(0,I.default)(y.H6).attrs({color:"text",padding:[2,4]}).withConfig({displayName:"styled__OtherRoomsHeader",componentId:"sc-1vwntsm-1"})([""]);var S=t(59721),P=t(72679),F=t(51571),Y=t(10444),U=t(56284),N=t(45087),j=t(74848);const z=()=>{const e=(0,Y.J)(),[n,t,,i]=(0,v.A)(),a=(0,o.useRef)(),s=(0,x.XA)(),{slug:r}=s,A=(0,B.ap)("name"),c=(0,P.A)({roomSlug:r}),l=(0,C.JT)("room:Read"),d=(0,B.dg)();return(0,j.jsxs)(o.Fragment,{children:[(0,j.jsxs)(y.Flex,{alignItems:"center",gap:1,"data-testid":"header-roomOptions",children:[(0,j.jsx)(N.A,{content:{title:"Room: ".concat(s.name||"Unknown"),description:"See all your rooms and switch room"},align:"bottom",disabled:n,children:(0,j.jsx)(Q.A,{ref:a,testid:"roomDropdownMenu-roomOptions",icon:"space_new",iconHeight:"14px",iconWidth:"14px",onClick:t,padding:[2],gap:2,round:1,width:"auto",disabled:!l||d,selected:n,children:!e&&s.name&&(0,j.jsxs)(y.Flex,{alignItems:"center",gap:3,children:[(0,j.jsx)(U.default,{maxFontSize:14,Component:y.Text,lineHeight:"unset",children:s.name}),(0,j.jsx)(y.Icon,{name:"chevron_down",size:"small",color:"text"})]})})}),(0,j.jsx)(S.A,{borderColor:"borderSecondary",hasHoverBackground:!0,children:(0,j.jsx)(N.A,{content:{title:"Room Settings",description:"Edit room settings, manage room nodes and users"},align:"bottom",children:(0,j.jsx)(F.A,{Component:y.IconButton,permission:"room:Leave","data-ga":"header::click-war-room-settings::global-view","data-testid":"header-manageRoom",onClick:c,icon:"gear"})})})]}),a.current&&n&&(0,j.jsx)(k,{target:a.current,onEsc:i,onClickOutside:i,children:(0,j.jsx)(D.A,{isOpen:!0,label:"ROOMS IN ".concat(A.toUpperCase()),headerTestId:"roomDropdownMenu-otherRooms",Header:R,children:(0,j.jsx)(y.Flex,{column:!0,height:{max:105},overflow:{vertical:"auto"},children:(0,j.jsx)(_.A,{})})})})]})};var H=t(24013),O=t(67462);t(89463);const L=e=>{let{description:n,title:t,testId:o,children:i}=e;return(0,j.jsxs)(y.Flex,{"data-testid":o,column:!0,gap:2,children:[(0,j.jsx)(y.H4,{color:"textDescription",children:t}),(0,j.jsxs)(y.Flex,{column:!0,"data-testid":o,gap:2,children:[(0,j.jsx)(y.Text,{color:"textDescription",children:n}),i]})]})},G=e=>{let{children:n,testId:t,icon:o="nodes_update"}=e;return(0,j.jsxs)(y.Flex,{"data-testid":t,padding:[6],round:1,width:"100%",background:"modalInfoBackground",gap:2,children:[(0,j.jsx)(y.Box,{as:y.Icon,width:10,height:10,name:o}),n]})},J=e=>{let{title:n,desciription:t}=e;return(0,j.jsxs)(y.Flex,{column:!0,gap:3,children:[(0,j.jsxs)(y.Flex,{alignItems:"center",gap:2,children:[(0,j.jsx)(y.Icon,{color:"main",name:"warning_triangle_hollow"}),(0,j.jsx)(y.H3,{children:n})]}),(0,j.jsx)(y.Box,{children:t})]})};var q=t(79748),K=t(41344),V=(t(98992),t(37550),t(89284)),X=t(49181),W=t(90930),Z=t(36504);const $=[{id:"name",accessor:"name",header:"Name",cell:e=>{let{getValue:n,row:t}=e;return(0,j.jsx)(q.A,{disabled:!t.original.isLive&&"stale"!==t.original.state,color:"text",hoverColor:"primary",visitedColor:"accent",Component:y.TextSmall,children:n()})},minSize:60},{id:"version",accessor:"version",header:"Version",cell:e=>{let{getValue:n}=e;return(0,j.jsx)(y.Pill,{color:"neutralPillColor",flavour:"neutral",children:n()})}},{id:"state",accessor:"state",header:"Status",cell:e=>{let{getValue:n}=e;return(0,j.jsx)(V.A,{state:n()})},sortingFn:(e,n)=>{return t=(0,X.GM)(e.original.state),o=(0,X.GM)(n.original.state),t===o?0:t>o?1:-1;var t,o},enableColumnFilter:!0,filterFn:(e,n,t)=>{const o=e.original;return t.length<1||t.some((e=>{let{value:n}=e;return"all"===n||n===(0,X.GM)(o.state)}))},meta:{tooltip:(0,j.jsx)(W.A,{}),filter:{component:"select",isMulti:!0,options:[{value:"Offline",label:"Offline"},{value:"Live",label:"Live"},{value:"Stale",label:"Stale"}]}}},{id:"updateSeverity",accessor:"updateSeverity",header:"Severity",cell:e=>{let{getValue:n,row:t}=e;const o=t.original;return(0,j.jsx)(Z.A,{name:o.name,os:o.os.id,container:o.hw.container,warningLevel:o.updateSeverity,labels:o.labels||{},version:o.version,text:n()})}}];const ee=(0,t(52035).eU)(!1),ne=function(){let{resetOnUnmount:e=!1}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,E.Xr)(ee),t=(0,o.useCallback)((()=>n(!1)),[n]),[i,a]=(0,E.fp)(ee),s=(0,o.useCallback)((()=>a(!0)),[]),r=(0,o.useCallback)((()=>a(!1)),[]);return(0,o.useEffect)((()=>()=>{e&&t()}),[]),{isModalVisible:i,openModal:s,closeModal:r,resetState:t}},te=[{id:"updateSeverity",desc:!1}],oe=()=>{const{closeModal:e}=ne(),n=(0,o.useMemo)((()=>({goto:{handleAction:e=>{const{_install_type:n=null}=(null===e||void 0===e?void 0:e.labels)||{},{hw:{container:t},os:{id:o},version:i}=e,a=(0,O.fe)({container:t,os:o,_install_type:n,version:i});window.open(a,"_blank","noopener,noreferrer")},tooltipText:"Go to documentation"}})),[]),t=(0,o.useMemo)((()=>({download:{handleAction:(0,y.downloadCsvAction)("Outdated nodes"),isDisabled:!1,tooltipText:"Download as CSV",icon:"download",confirmation:!1,alwaysEnabled:!0}})),[]),i=(0,K.Zp)(),a=(0,H.Ux)();return{onClickRow:(0,o.useCallback)((n=>{let{data:t}=n;if(!t.isLive&&"stale"!==t.state)return;const o=a(t.id);i(o),e()}),[]),sortBy:te,rowActions:n,columns:$,disableClickRow:e=>{let{data:n}=e;return!n.isLive&&"stale"!==n.state},bulkActions:t}};var ie=t(37156);const ae={updateSeverity:!0,connectionToCloud:!1},se=e=>{let{data:n}=e;const{bulkActions:t,rowActions:o,onClickRow:i,disableClickRow:a,sortBy:s}=oe();return(0,j.jsx)(ie.A,{"data-testid":"nodesTable-layout",customSortBy:s,customNodes:n,showDefaultRowActions:!1,showDefaultBulkActions:!1,columnVisibility:ae,customRowActions:o,customBulkActions:t,enableSelection:!1,onClickRow:i,disableClickRow:a})},re=e=>{let{onClose:n}=e;const t=(0,H.Gn)(),o=(0,H.Gt)(t),i=t.length,a="Need update (".concat(t.length," ").concat((0,O.su)(t.length),")");return(0,j.jsx)(y.Modal,{backdropProps:{backdropBlur:!0},children:(0,j.jsxs)(y.ModalContent,{width:{min:200,base:270},height:200,children:[(0,j.jsxs)(y.ModalHeader,{children:[(0,j.jsx)(J,{title:"Nodes that need Attention",desciription:(0,j.jsxs)(y.Text,{children:["You have ",(0,j.jsx)(y.Text,{strong:!0,children:i})," ",(0,O.su)(i)," that should be upgraded to ensure experience using Netdata"]})}),(0,j.jsx)(y.ModalCloseButton,{onClose:n,testId:"close-button"})]}),(0,j.jsxs)(y.ModalBody,{overflow:"hidden",children:[(0,j.jsx)(G,{children:(0,j.jsx)(L,{title:a,children:(0,j.jsx)(y.Text,{children:(0,j.jsxs)(y.Flex,{column:!0,children:[(0,j.jsx)(y.Text,{children:"Please upgrade to ensure you get the latest security and bug fixes."})," ",(0,j.jsxs)(y.Box,{children:["To update your nodes to the latest version"," ",(0,j.jsxs)(q.A,{href:O.sH.default,rel:"noopener noreferrer",strong:!0,target:"_blank",children:["please read our documentation"," "]}),"and find direct links for each node depending on where it is running and/or how it was installed."]})]})})})}),(0,j.jsx)(se,{data:o})]})]})})};var Ae=t(67012),ce=t(23226),le=t(81641),de=t(86529);const ue=(0,I.default)(y.Flex).withConfig({displayName:"container__Container",componentId:"sc-qc13l0-0"})(["background:",";"],(e=>{let{theme:n,isPlaying:t}=e;const{name:o}=n;return("Dark"===o?(0,y.getRgbColor)(t?["green","green40"]:["neutral","grey70"]):(0,y.getRgbColor)(t?["green","green190"]:["neutral","grey185"]))({theme:n})}));var he=t(24285),me=t(64463);const ge=(0,I.default)(N.A).withConfig({displayName:"playPausePill__StyledTooltip",componentId:"sc-1umryvu-0"})(["pointer-events:",";"],(e=>{let{isDisabled:n}=e;return n?"none":"auto"})),pe=e=>{let{isPlaying:n,isForcePlaying:t,isDisabled:i}=e;const a=(0,Y.J)(),s=(0,he.xd)("defaultForcePlay"),{play:r,pause:A,forcePlay:c}=(0,me.A)(),l=(0,o.useMemo)((()=>((e,n)=>e?n?"forcePlay":"playSolid":"pauseSolid")(n,t)),[n,t]),d=(0,o.useMemo)((()=>n?{title:"Dashboard is Auto-Refreshing",description:"Click to pause automatic refreshes."}:{title:"Dashboard is Paused",description:"Click to enable automatic fetching of new data."}),[n]),u=(0,o.useMemo)((()=>n?A:s?c:r),[n,s,A,c,r]);return(0,j.jsx)(ge,{content:d,align:"bottom",isDisabled:i,children:(0,j.jsx)(y.Box,{as:y.Pill,icon:l,onClick:u,isPlaying:n,"data-testid":"playPause-button",flavour:n?"success":"neutral",_hover:{background:n?"accent":"neutralPillColor"},style:{minWidth:"77px"},children:a?null:n?"Playing":"Paused"})})};var fe=t(78804),ye=t(78069);const be=(0,I.default)(y.Flex).attrs({padding:[1],role:"button"}).withConfig({displayName:"styled__MenuButton",componentId:"sc-3h0zgs-0"})(["cursor:pointer;"]),Ee=(0,I.default)(y.Drop).attrs({align:{top:"bottom",left:"left"},animation:!0,backdrop:!0,column:!0,padding:[2,0],background:"dropdown",round:1,overflow:{vertical:"auto"},margin:[.5,0,0],width:40}).withConfig({displayName:"styled__Dropdown",componentId:"sc-3h0zgs-1"})([""]),we=(0,I.default)(N.A).withConfig({displayName:"styled__StyledTooltip",componentId:"sc-3h0zgs-2"})(["pointer-events:",";"],(e=>{let{isDisabled:n}=e;return n?"none":"auto"})),Be=e=>{let{target:n,isDisabled:t}=e;const[i,a,,s]=(0,v.A)(),{play:r,pause:A,forcePlay:c}=(0,me.A)(),{handleOpenProfileModal:l}=(0,ye.A)(),d={title:"Auto-refresh Mode",description:(0,j.jsxs)(j.Fragment,{children:["Select ",(0,j.jsx)("strong",{children:"Pause"})," (no automatic refreshes), ",(0,j.jsx)("strong",{children:"Play"})," (automatic refreshes) or ",(0,j.jsx)("strong",{children:"Force Play"})," (automatic refreshes even when the dashboard does not have your focus)."]})};return(0,j.jsxs)(o.Fragment,{children:[i?(0,j.jsx)(be,{onClick:a,width:"auto","data-testid":"playOptions-picker",children:(0,j.jsx)(y.Icon,{name:"chevron_down",color:"text",width:"12px",height:"12px"})}):(0,j.jsx)(we,{content:d,align:{bottom:"bottom",right:"right"},isDisabled:t,children:(0,j.jsx)(be,{onClick:a,width:"auto","data-testid":"playOptions-picker",children:(0,j.jsx)(y.Icon,{name:"chevron_down",color:"text",width:"12px",height:"12px"})})}),n.current&&i&&!t&&(0,j.jsxs)(Ee,{target:n.current,onEsc:s,onClickOutside:s,children:[(0,j.jsx)(Q.A,{icon:"playOutline",onClick:()=>{r(),s()},testid:"playOptions-play",children:"Play"}),(0,j.jsx)(Q.A,{icon:"pauseOutline",onClick:()=>{A(),s()},testid:"playOptions-pause",children:"Pause"}),(0,j.jsx)(Q.A,{icon:"forcePlayOutline",onClick:()=>{c(),s()},testid:"playOptions-forcePlay",children:"Force Play"}),(0,j.jsx)(fe.c,{}),(0,j.jsx)(Q.A,{icon:"gear",onClick:()=>{l("preferences"),s()},testid:"playOptions-settings",children:"Default refresh mode"})]})]})},Ce=(0,o.memo)(Be),Me=()=>{const e=(0,Y.J)(),n=(0,le.yD)(),t=(0,le.mQ)(),i=(0,le.A_)(),s=(0,o.useRef)(),r=(0,le.gC)(),A=(0,a.useAttributeValue)("autofetchOnWindowBlur"),[,c]=(0,ce.A)();return(0,Ae.A)((()=>{c(!!n||!!t)}),[n,t]),(0,j.jsx)(S.A,{hasBorder:!0,borderColor:"border",testId:"global-controls",children:(0,j.jsxs)(ue,{isPlaying:r,padding:e?[1]:[1,2],round:!0,height:"100%",alignItems:"center",gap:e?.2:1,isDisabled:n,ref:s,children:[(0,j.jsx)(pe,{isPlaying:r,isForcePlaying:A,isDisabled:n||t}),(0,j.jsx)(Ce,{target:s,isDisabled:n||t}),!i&&(0,j.jsx)(de.A,{isPlaying:r,tagging:"global-view"})]})})};var Te=t(5722),Ie=t(96184),ve=t(17662);const _e=()=>{const{openModal:e}=ne(),n=(0,H.Gn)({severity:"critical"}).length;return n?(0,j.jsx)(ve.A,{numberOfNodesWithCriticalSeverity:n,warningLevel:"critical",onClickUpdate:e}):(0,j.jsx)(j.Fragment,{})},Qe=e=>{let{onOpenModalClick:n}=e;const t=(0,H.Gn)({severity:"critical"}).length,o=(0,H.Gn)().length;return o?(0,j.jsx)(S.A,{borderColor:"borderSecondary",cursor:"pointer",testId:"update-netdata-version-menu-item",hasBorder:!1,padding:[2,0],children:(0,j.jsx)(y.Pill,{"data-testid":"update-netdata-version-badge",flavour:t?"error":"warning",icon:"warning_triangle_hollow",hollow:!0,onClick:n,children:o})}):null};var De=t(3692),xe=t(38648),ke=t(52950);const Re={offline:{icon:"switch_off",color:"attention",text:"Offline"},online:{icon:"checkmark_s",color:"primary",text:"Online"},indirect:{icon:"checkmark_s",color:"primary",text:"Indirect"}},Se=()=>{const[e,n]=(0,o.useState)((()=>navigator&&!navigator.onLine||!1)),t=(0,B.dg)(),i=(0,x.GJ)(),a=(0,A.uW)("isAnonymous"),[{canBeClaimed:s,cloudStatus:r}]=(0,d.RJ)();if((0,o.useEffect)((()=>{const e=()=>n(!0),t=()=>n(!1);return window.addEventListener("offline",e),window.addEventListener("online",t),()=>{window.removeEventListener("offline",e),window.removeEventListener("online",t)}}),[]),a)return(0,j.jsx)(xe.default,{});if(e)return(0,j.jsxs)(y.Flex,{alignItems:"center",gap:1,title:"It seems you are offline! Check your network connection...",children:[(0,j.jsx)(y.Icon,{name:Re.offline.icon,color:"error",width:"14px",height:"14px"}),(0,j.jsx)(y.TextSmall,{color:"error",children:Re.offline.text})]});if(!t)return i&&s?(0,j.jsxs)(y.Flex,{alignItems:"center",gap:1,children:[(0,j.jsx)(De.A,{}),(0,j.jsx)(ke.A,{})]}):(0,j.jsx)(ke.A,{});if(Re[r]){const{icon:e,color:n,text:t}=Re[r];return(0,j.jsxs)(y.Flex,{alignItems:"center",gap:1,title:"Netdata Cloud is ".concat(t),children:[(0,j.jsx)(y.Icon,{name:e,color:n,width:"14px",height:"14px"}),(0,j.jsx)(y.TextSmall,{color:n,children:t})]})}return s?(0,j.jsx)(De.A,{small:!0}):null};t(54520);var Pe=t(25962);const Fe=()=>{const[{nodesLoaded:e,claimedNodeIds:n}]=(0,Pe.A)(),t=(0,H.Gt)(n),i=t.filter((e=>{let{bugs:n}=e;return!!(n||[]).length})),a=(0,o.useMemo)((()=>i.length),[i]),s=(0,o.useMemo)((()=>e&&a>0&&a==t.length),[e,t,a]);return{numberOfAffectedNodes:a,affectedNodes:i,allNodesAffected:s}};t(3949);var Ye=t(64467),Ue=t(56171);function Ne(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function je(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Ne(Object(t),!0).forEach((function(n){(0,Ye.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Ne(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ze=(0,o.memo)((()=>(0,j.jsxs)(y.Text,{color:"main",children:["might be affected by a"," ",(0,j.jsx)(q.A,{href:"https://access.redhat.com/errata/RHSA-2019:0512",target:"_blank",rel:"noopener noreferrer",as:"a",cursor:"pointer",textDecoration:"underline",color:"main",hoverColor:"textFocus",children:"kernel bug"}),". Please update to avoid unexpected behaviour."]}))),He=(Oe=(0,o.memo)((e=>{let{numberOfAffectedNodes:n,allNodesAffected:t}=e;const o=(0,B.bq)(),i="node".concat(n>1?"s":"");return(0,j.jsxs)(y.Text,{color:"main",children:[(0,j.jsxs)(q.A,{as:K.N_,to:"/spaces/".concat(o,"/settings/nodes"),textDecoration:"underline",color:"main",hoverColor:"textFocus",children:[t?"Your":n," ",i]})," ",(0,j.jsx)(ze,{})]})})),e=>(0,j.jsxs)(y.Flex,{"data-testid":"buggy-kernel-message",justifyContent:"center",alignItems:"center",width:"100%",gap:2,children:[(0,j.jsx)(y.Icon,{color:"error",name:"warning_triangle_hollow"})," ",(0,j.jsx)(Oe,je({},e))]}));var Oe;const Le=e=>(0,j.jsx)(Ue.A,{background:"errorBackground",children:(0,j.jsx)(He,je({},e))}),Ge=(0,o.memo)(Le),Je=()=>{const{numberOfAffectedNodes:e,allNodesAffected:n}=Fe();return e>0?(0,j.jsx)(Ge,{numberOfAffectedNodes:e,allNodesAffected:n}):null};var qe=t(80045);t(81454);function Ke(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Ve(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Ke(Object(t),!0).forEach((function(n){(0,Ye.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Ke(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Xe=(0,I.default)(y.Text).attrs((e=>Ve({color:"main"},e))).withConfig({displayName:"webinarPromotion__Txt",componentId:"sc-f6zwf-0"})([""]),We=(0,I.default)(q.A).attrs((e=>Ve({hoverColor:"main"},e))).withConfig({displayName:"webinarPromotion__StyledAnchor",componentId:"sc-f6zwf-1"})(["text-decoration:underline;"]),Ze=e=>{let{isPaid:n,isTrial:t}=e;return!n||t?(0,j.jsxs)(y.Flex,{with:"100%",alignItems:"center",gap:2,padding:[1,0],children:[(0,j.jsx)(y.Icon,{name:"insights",color:"main"}),(0,j.jsxs)(Xe,{children:["Live Webinar: AI-Powered Monitoring with Netdata Cloud MCP Server |"," ",(0,j.jsx)(We,{Component:Xe,href:"https://www.netdata.cloud/webinars/netdata-cloud-mcp-server/",target:"_blank",rel:"noopener noreferrer",children:"Register Free"})]})]}):null};var $e=t(40545);const en=()=>{const{id:e}=(0,$e.A)(),n=(0,B.vt)();return(0,o.useCallback)((t=>"dismissed-static-banner-".concat(t,"-").concat(e,"-").concat(n)),[e,n])};var nn=t(53949),tn=t(76375),on=t(40573),an=t(4204);const sn=["id","Component","componentProps"];function rn(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function An(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?rn(Object(t),!0).forEach((function(n){(0,Ye.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):rn(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const cn={demo:"demo",freePlan:"freePlan",paidPlan:"paidPlan",trial:"trial",onprem:"onprem",agent:"agent"},ln=e=>{let{id:n,Component:t,componentProps:i={}}=e,a=(0,qe.A)(e,sn);const s=en(),{dismissed:r,onClose:A}=(0,nn.A)({getLocalStorageKey:()=>s(n),logKey:"StaticBannerDismiss-".concat(n)}),c=(0,o.useMemo)((()=>a.isDismissable?{onClose:A}:{}),[a.isDismissable,A]);return r?null:(0,j.jsx)(Ue.A,An(An(An({},c),a),{},{children:(0,j.jsx)(t,An({},i))}),n)},dn=()=>{const{loaded:e,isPaid:n,isTrial:t}=(0,tn.A)(),{isDemo:i}=(0,on.A)(),a=(0,B.dg)(),s=(0,o.useMemo)((()=>e?(e=>{let{isDemo:n,isPaid:t,isTrial:o,onVirtualSpace:i}=e;return[{id:"webinar-promotion",Component:Ze,componentProps:{},background:"secondaryHighlightAI",isDismissable:!0,disabled:!1,visibility:[cn.paidPlan,cn.freePlan,cn.trial,cn.agent]}].filter((e=>{if(e.disabled)return!1;const{visibility:a=[]}=e;return!(n&&!a.includes(cn.demo))&&!(t&&!a.includes(cn.paidPlan))&&!(t&&!a.includes(cn.freePlan))&&!(o&&!a.includes(cn.trial))&&!(i&&!a.includes(cn.agent))&&!(an.ei&&!a.includes(cn.onprem))}))})({isDemo:i,isPaid:n,isTrial:t,onVirtualSpace:a}):[]),[e,n]);return(0,j.jsx)(j.Fragment,{children:s.map((e=>(0,j.jsx)(ln,An({},e),e.id)))})},un=(0,f.A)((()=>Promise.all([t.e(9296),t.e(7414)]).then(t.bind(t,7414))),"NodesIndicator"),hn=I.default.div.withConfig({displayName:"header__HeaderContainer",componentId:"sc-1jxz29-0"})(["display:",";"],(e=>{let{showHeader:n}=e;return n?"contents":"none"})),mn=()=>{const e=(0,Y.J)(),{isModalVisible:n,closeModal:t,openModal:i}=ne(),a=(0,Te.A)();return(0,j.jsxs)(hn,{showHeader:a,children:[(0,j.jsx)(_e,{}),(0,j.jsx)(Je,{}),(0,j.jsx)(dn,{}),(0,j.jsxs)(y.Flex,{as:"header","data-testid":"header",padding:e?[1]:[2],justifyContent:"between",alignItems:"center",height:12,position:"sticky",background:"topBarBg",zIndex:5,overflow:{horizontal:"auto",vertical:"hidden"},children:[n&&(0,j.jsx)(re,{onClose:t}),(0,j.jsx)(z,{}),(0,j.jsxs)(y.Flex,{alignItems:"center","data-testid":"header-features",gap:e?1:3,children:[(0,j.jsx)(Me,{}),(0,j.jsx)(o.Suspense,{fallback:(0,j.jsx)(Ie.q,{disabled:!0}),children:(0,j.jsx)(un,{})}),(0,j.jsx)(Se,{margin:[0,1,0,0],padding:[2,4]}),(0,j.jsx)(Qe,{onOpenModalClick:i})]})]})]})};var gn=t(99722);const pn=(0,I.default)(y.Flex).withConfig({displayName:"layout__UserControlContainer",componentId:"sc-18en99b-0"})(["bottom:0;left:0;"]),fn=e=>{let{children:n}=e;return(0,j.jsxs)(y.Flex,{width:"100vw",height:"100vh",column:!0,justifyContent:"center",alignItems:"center",background:"mainBackground",position:"relative",children:[n,(0,j.jsx)(pn,{position:"absolute",padding:[4],children:(0,j.jsx)(gn.A,{})})]})},yn=(0,I.keyframes)(["from{opacity:0.4;}to{opacity:1;}"]),bn=(0,I.default)(y.Icon).withConfig({displayName:"loading__StyledIcon",componentId:"sc-11p1wp-0"})(["width:208px;height:177px;animation:"," 1.6s ease-in infinite;"],yn),En=()=>(0,j.jsxs)(o.Fragment,{children:[(0,j.jsx)(bn,{name:"netdata",color:"primary",title:"Loading","data-testid":"spaceCreationLoading-logo"}),(0,j.jsx)(y.H3,{color:"text",margin:[1,0,0],children:"We are attempting to create your space but the system is currently busy."}),(0,j.jsx)(y.Text,{color:"text",margin:[4.5,0,0],children:"Thank you for your patience!"})]});var wn=t(91370);const Bn=()=>{const e=(0,A.NJ)(),n=(0,A.uW)("email"),[t,i]=(0,o.useState)(!1),a=(0,o.useCallback)((()=>i(!1)),[]),s=(0,wn.A)({onSuccess:a,onError:a,isDefault:!0}),r=(0,o.useCallback)((()=>{i(!0),s({userId:e,email:n})}),[e,n]);return(0,j.jsxs)(y.Flex,{column:!0,gap:6,alignItems:"center",children:[(0,j.jsxs)(y.Flex,{column:!0,gap:2,alignItems:"center",children:[(0,j.jsx)(y.H3,{color:"text",children:"There was a problem with automatically creating your space"}),(0,j.jsx)(y.Text,{color:"text",children:"Please try again below"})]}),(0,j.jsx)(y.Button,{label:"Continue",isLoading:t,onClick:r})]})},Cn=()=>{const[e,n]=(0,o.useState)(!0);return(0,o.useEffect)((()=>{const e=setTimeout((()=>n(!1)),1e4);return()=>clearTimeout(e)}),[]),(0,j.jsx)(fn,{children:e?(0,j.jsx)(En,{}):(0,j.jsx)(Bn,{})})};var Mn=t(52367),Tn=t(39175),In=t(32742);const vn=()=>(0,j.jsx)(y.Flex,{background:"sideBarMini","data-testid":"workspaceBar",padding:[3,2],column:!0,alignItems:"center",gap:6,height:"100vh",justifyContent:"between",children:(0,j.jsx)(In.A,{"data-testid":"workspaceBar-netdataLogo"})}),_n=(0,f.A)((()=>t.e(2201).then(t.bind(t,52201)).then((e=>({default:e.TrialWarning})))),"TrialWarning"),Qn=(0,f.A)((()=>t.e(5030).then(t.bind(t,75030))),"OnPremWarning"),Dn=(0,f.A)((()=>t.e(2558).then(t.bind(t,12558))),"AgentNotSecure"),xn=(0,f.A)((()=>t.e(124).then(t.bind(t,70124))),"SpaceSidebar"),kn=(0,f.A)((()=>t.e(891).then(t.bind(t,40891))),"Modals"),Rn=(0,f.A)((()=>t.e(2436).then(t.bind(t,32436))),"AcceptTermsDialog"),Sn=(0,f.A)((()=>t.e(6497).then(t.bind(t,96497))),"DemoForm"),Pn=(0,f.A)((()=>Promise.all([t.e(564),t.e(5137),t.e(2150),t.e(5997),t.e(9296),t.e(4416)]).then(t.bind(t,24416))),"SpacePages"),Fn=(0,f.A)((()=>t.e(7281).then(t.bind(t,7281))),"DndContext"),Yn=(0,f.A)((()=>t.e(2143).then(t.bind(t,62143))),"DynamicConfigurationModal"),Un=(0,f.A)((()=>t.e(7931).then(t.bind(t,27931))),"SpaceWarnings"),Nn=(0,o.memo)((e=>{let{isUserLoaded:n,error:t,isAnonymous:i,areSpacesLoaded:a}=e;const{isOpen:s,modal:r}=(0,Mn.A)();return(0,j.jsx)(Fn,{children:(0,j.jsxs)(y.Flex,{overflow:"hidden",height:"100vh",width:"100vw",children:[(0,j.jsx)(o.Suspense,{fallback:(0,j.jsx)(vn,{}),children:(0,j.jsx)(xn,{})}),(0,j.jsxs)(y.Flex,{column:!0,position:"relative",overflow:"hidden",flex:!0,children:[(0,j.jsx)(o.Suspense,{fallback:"",children:(0,j.jsx)(Un,{})}),(0,j.jsx)(o.Suspense,{fallback:"",children:(0,j.jsx)(_n,{})}),(0,j.jsx)(mn,{}),(0,j.jsx)(y.Flex,{position:"relative",column:!0,as:"main",background:"mainBackground",flex:!0,basis:"100%",height:"100%",overflow:"hidden",children:n&&(t||i||a)?(0,j.jsx)(o.Suspense,{fallback:(0,j.jsx)(T.Qk,{}),children:(0,j.jsx)(Pn,{})}):(0,j.jsx)(T.Qk,{})}),(0,j.jsxs)(o.Suspense,{fallback:"",children:[(0,j.jsx)(Qn,{}),(0,j.jsx)(Dn,{})]})]}),(0,j.jsxs)(o.Suspense,{fallback:"",children:[(0,j.jsx)(kn,{}),(0,j.jsx)(Rn,{}),(0,j.jsx)(Sn,{})]}),s&&r,(0,j.jsx)(Yn,{})]})})})),jn=e=>{let{isUserLoaded:n}=e;!function(){let{isUserLoaded:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,B.Sj)(),t=(0,E.Xr)(C.jD),o=e=>{let{results:o}=e;n(o),t(o)};(0,b.A)((()=>({key:"spaces",cache:!1,autorun:!!e,fetch:w.EL,association:{getIds:()=>(0,B.nC)("ids"),getError:()=>(0,B.nC)("error"),getLoaded:()=>(0,B.nC)("loaded"),getUpdatedAt:()=>(0,B.nC)("updatedAt")},getResource:e=>(0,B.U2)({id:e}),onReceive:o,pollingOptions:{pollingInterval:412e3}})),[!!e])}({isUserLoaded:n}),(0,M.A)();const t=(0,B.Pk)(),o=(0,B.UV)("loaded"),i=(0,B.UV)("error"),a=(0,A.uW)("isAnonymous"),s=t.length;if(n&&i&&!s)throw i;return!n||!o||Tn.Ay||a||s?(0,j.jsx)(Nn,{isUserLoaded:n,error:i,isAnonymous:a,areSpacesLoaded:o}):(0,j.jsx)(Cn,{})};var zn=t(40961),Hn=t(97394),On=t(56523);const Ln=I.default.div.withConfig({displayName:"onboardingSpotlight__Overlay",componentId:"sc-1bjs1r3-0"})(["position:absolute;background:rgba(0,0,0,0.5);pointer-events:auto;"]),Gn=I.default.div.withConfig({displayName:"onboardingSpotlight__Arrow",componentId:"sc-1bjs1r3-1"})(["position:absolute;width:12px;height:12px;background:",";border:1px solid ",";transform:rotate(45deg);",""],(0,y.getColor)("mainBackground"),(0,y.getColor)("border"),(e=>{let{position:n}=e;switch(n){case"top":return"\n          bottom: -7px;\n          left: 50%;\n          margin-left: -6px;\n          border-top: none;\n          border-left: none;\n        ";case"bottom":return"\n          top: -7px;\n          left: 50%;\n          margin-left: -6px;\n          border-bottom: none;\n          border-right: none;\n        ";case"left":return"\n          right: -7px;\n          top: 50%;\n          margin-top: -6px;\n          border-bottom: none;\n          border-left: none;\n        ";case"right":return"\n          left: -7px;\n          top: 50%;\n          margin-top: -6px;\n          border-top: none;\n          border-right: none;\n        ";default:return""}})),Jn=()=>{const[e,n]=(0,E.fp)(Hn.TZ),[t,i]=(0,o.useState)(null),[a,s]=(0,o.useState)(0),{markTaskCompleted:r}=(0,On.A)(),{open:A}=(0,Mn.A)(),c=(0,o.useRef)(null),l=(0,o.useRef)(null),d=(0,o.useMemo)((()=>e?e.steps||[e]:[]),[e]),u=d[a],h=d.length>1,m=a===d.length-1;(0,o.useEffect)((()=>{(null===e||void 0===e?void 0:e.taskId)!==l.current&&(s(0),l.current=null===e||void 0===e?void 0:e.taskId)}),[null===e||void 0===e?void 0:e.taskId]),(0,o.useEffect)((()=>{e&&!c.current&&(c.current=window.location.pathname),e||(c.current=null)}),[e]),(0,o.useEffect)((()=>{e&&c.current&&window.location.pathname!==c.current&&(n(null),c.current=null)}),[e,n]);const g=(0,o.useCallback)((()=>{m?(e&&r(e.milestoneId,e.taskId),n(null)):s((e=>e+1))}),[m,e,r,n]),p=null!==(f=u)&&void 0!==f&&f.selector?f.selector:null!==f&&void 0!==f&&f.targetId?'[data-onboarding-id="'.concat(f.targetId,'"]'):null;var f;const b=(0,o.useRef)(null),w=(0,o.useRef)(g);w.current=g,(0,o.useEffect)((()=>{if(!p)return i(null),void(b.current=null);if(b.current===p)return;const e=()=>{const e=document.querySelector(p);return!!e&&(e.scrollIntoView({behavior:"smooth",block:"center"}),i(e.getBoundingClientRect()),b.current=p,!0)};if(i(null),!e()){const n=setInterval((()=>{e()&&clearInterval(n)}),500),t=setTimeout((()=>{clearInterval(n),e()||w.current()}),3e3);return()=>{clearInterval(n),clearTimeout(t)}}const n=()=>{const e=document.querySelector(p);e&&i(e.getBoundingClientRect())};return window.addEventListener("resize",n),window.addEventListener("scroll",n,!0),()=>{window.removeEventListener("resize",n),window.removeEventListener("scroll",n,!0)}}),[p]);const B=(0,o.useCallback)((()=>{e&&r(e.milestoneId,e.taskId),n(null)}),[e,r,n]),C=(0,o.useCallback)((()=>{const e=a+1;if(e>=d.length)return;b.current=null,s(e);const n=d[e];null!==n&&void 0!==n&&n.openAiPanel&&A()}),[a,d,A]),M=(0,o.useCallback)((()=>{a<=0||(b.current=null,s(a-1))}),[a]),T=(0,o.useCallback)((e=>{e.target===e.currentTarget&&B()}),[B]);if(!e||!u||!t)return null;const I=b.current===p,v=((e,n)=>{switch(n){case"top":return{top:e.top-8,left:e.left+e.width/2,transform:"translate(-50%, -100%)"};case"bottom":return{top:e.bottom+8,left:e.left+e.width/2,transform:"translateX(-50%)"};case"left":return{top:e.top+e.height/2,left:e.left-8,transform:"translate(-100%, -50%)"};case"right":return{top:e.top+e.height/2,left:e.right+8,transform:"translateY(-50%)"};default:return{top:e.bottom+12,left:e.left+e.width/2,transform:"translateX(-50%)"}}})(t,u.position||"bottom"),_={top:t.top-4,left:t.left-4,width:t.width+8,height:t.height+8};return(0,zn.createPortal)((0,j.jsxs)(y.Box,{"data-name":"spotlight",position:"fixed",sx:{inset:0,zIndex:9999,pointerEvents:"none"},children:[(0,j.jsx)(Ln,{style:{top:0,left:0,right:0,height:_.top},onClick:T}),(0,j.jsx)(Ln,{style:{top:_.top+_.height,left:0,right:0,bottom:0},onClick:T}),(0,j.jsx)(Ln,{style:{top:_.top,left:0,width:_.left,height:_.height},onClick:T}),(0,j.jsx)(Ln,{style:{top:_.top,left:_.left+_.width,right:0,height:_.height},onClick:T}),I&&(0,j.jsxs)(y.Flex,{column:!0,background:"mainBackground",border:!0,round:2,padding:[4],width:{max:"400px",min:"200px"},zIndex:1e4,overflow:"visible",position:"absolute",top:v.top,left:v.left,margin:[0],sx:{boxShadow:"0 4px 12px rgba(0, 0, 0, 0.15)",pointerEvents:"auto",transform:v.transform},children:[(0,j.jsx)(Gn,{position:u.position||"bottom"}),h&&(0,j.jsxs)(y.TextMicro,{color:"textLite",whiteSpace:"nowrap",children:["Step ",a+1," of ",d.length]}),(0,j.jsxs)(y.Flex,{justifyContent:"between",alignItems:"center",gap:2,children:[(0,j.jsx)(y.Text,{strong:!0,children:u.title}),(0,j.jsx)(y.IconButton,{icon:"x",neutral:!0,onClick:B})]}),(0,j.jsx)(y.TextSmall,{color:"textLite",children:u.description}),(0,j.jsxs)(y.Flex,{justifyContent:"end",gap:2,padding:[2,0,0,0],children:[h&&!m&&(0,j.jsx)(y.Button,{label:"Skip",flavour:"hollow",small:!0,onClick:B}),h&&a>0&&(0,j.jsx)(y.Button,{label:"Back",flavour:"hollow",small:!0,onClick:M}),m?(0,j.jsx)(y.Button,{label:"Done",small:!0,onClick:B}):(0,j.jsx)(y.Button,{label:"Next",small:!0,onClick:C})]})]})]}),document.body)},qn=(0,a.withChartProvider)((()=>{const e=(0,A.uW)("isLoaded"),n=(0,A.uW)("email"),t=(0,A.NJ)(),a=(0,A.uW)("isAnonymous"),s=(0,p.c)(),r=(0,l.OS)();return(0,o.useEffect)((()=>{a||r()}),[a]),(0,o.useEffect)((()=>{if(t&&n)try{i.gV({id:t,email:n})}catch(e){console.warn("Sentry: unable to set user")}}),[n,t]),(0,l.Ay)(),(0,c.Ay)(),(0,d.Ay)(),g(),(0,j.jsxs)(j.Fragment,{children:[(0,j.jsx)(jn,{isUserLoaded:e},s),(0,j.jsx)(Jn,{})]})})),Kn=(0,r.Xc)((()=>{const e=(0,s.eg)().getRoot();return(0,j.jsx)(qn,{chart:e})}))},56171(e,n,t){"use strict";t.d(n,{A:()=>h});t(98992),t(54520),t(3949);var o=t(80045),i=t(64467),a=t(51510),s=t(42358),r=t(45087),A=t(74848);const c=["children","background","testId","onClose","tooltipProps"];function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function d(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?l(Object(t),!0).forEach((function(n){(0,i.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):l(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const u=(0,a.default)(s.Box).attrs((e=>d({position:"absolute",top:"50%",right:"8px",cursor:"pointer",color:"text",name:"x"},e))).withConfig({displayName:"banner__StyledBox",componentId:"sc-1j1w4tz-0"})(["transform:translateY(-50%);"]),h=e=>{let{children:n,background:t,testId:i,onClose:a,tooltipProps:l}=e,h=(0,o.A)(e,c);const m=i||"header-banner";return(0,A.jsxs)(s.Flex,d(d({background:t,"data-testid":m,padding:[2,10,2,2],position:"sticky",zIndex:5},h),{},{children:[n,(0,A.jsx)(r.A,d(d({align:"bottom",content:"Dismiss message",isBasic:!0,plain:!0,stretch:"align"},l),{},{children:a&&(0,A.jsx)(u,{"data-testid":"".concat(m,"-close-button"),as:s.Icon,onClick:e=>{a&&(e.stopPropagation(),a())}})}))]}))}},81931(e,n,t){"use strict";t.d(n,{j:()=>s});t(62953),t(48408);var o=t(91130),i=t(41395);const a=e=>(window.localNetdataRegistry.mg=e.agent.mg,window.localNetdataRegistry.hostname=(0,i.Yv)(e.agent.nm||"agent"),{cloudStatus:e.cloud.status,canBeClaimed:e.can_be_claimed,keyFilename:e.key_filename,claimId:e.cloud.claim_id,mg:e.agent.mg,nd:e.agent.nd,success:e.success,message:e.message,cmd:e.cmd,help:e.help}),s=function(){let{key:e,token:n,rooms:t,url:i}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},s=new URLSearchParams({key:e,rooms:t,token:n,url:i}).toString();return s=e&&t&&n&&i&&s?"?".concat(s):"",o.A.get("/api/v3/claim".concat(s),{baseURL:window.envSettings.agentApiUrl,transform:a})}},3692(e,n,t){"use strict";t.d(n,{A:()=>ne});var o=t(64467),i=(t(98992),t(54520),t(3949),t(62953),t(42358)),a=t(98591),s=t(45087),r=t(41936),A=t(96540),c=t(51510),l=t(30005),d=t(55093),u=t(80045),h=(t(81454),t(39175)),m=t(6304),g=t(30582),p=t(24609),f=t(24655),y=t(61337),b=t(51571),E=t(74848);const w=["selectedSpace","setSelectedSpace","column","showFullname"];function B(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function C(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?B(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):B(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const M=c.default.div.withConfig({displayName:"spaces__Separator",componentId:"sc-19p9rsp-0"})(["height:1px;width:",";background:",";"],(0,i.getSizeBy)(3),(0,i.getColor)("border")),T=e=>{let{selectedSpace:n,setSelectedSpace:t,column:o,showFullname:a}=e,r=(0,u.A)(e,w);const c=(0,p.Pk)(),l=(0,A.useMemo)((()=>c.filter((e=>!(0,h.ES)(e)))),[c]),[d,,B,T]=(0,m.A)();return(0,E.jsxs)(i.Flex,C(C({"data-testid":"workspaceBar",padding:[3,2],column:o,alignItems:"center",justifyContent:"center"},r),{},{children:[(0,E.jsxs)(i.Flex,{column:!0,"data-testid":"workspaceBar-spaces-list",gap:4,alignItems:"center",overflow:"hidden",children:[(0,E.jsx)(i.Flex,{column:o,"data-testid":"claim-modal-workspaceBar-spacesList",gap:o?4:2,overflow:{vertical:"auto"},flexWrap:!o,children:l.map((e=>(0,E.jsx)(f.A,{testIdPrefix:"workspaceBar-space",spaceId:e,onClick:t,active:e===(null===n||void 0===n?void 0:n.id),backgroundColor:"selected",showFullname:a},e)))}),(0,E.jsxs)(y.A,{permission:"user:CreateSpace",children:[(0,E.jsx)(M,{"data-testid":"workspaceBar-separator"}),(0,E.jsx)(s.A,{content:"Create a new Space",align:"right",children:(0,E.jsx)(b.A,{permission:"user:CreateSpace",ifForbidden:"hide",icon:"plus",onClick:B,"data-testid":"workspaceBar-addSpace-button"})})]})]}),d&&(0,E.jsx)(g.A,{onClose:T,onDone:t})]}))};var I=t(85005),v=t(98595),_=t(99728),Q=t(84060),D=t(49916),x=t(12470);const k=e=>{let{selectedSpace:n,selectedRoom:t,setSelectedRoom:o}=e;const a=n.id;(0,Q.A)(a);const r=(0,_.JT)("room:Create",a),c=(0,_.JT)("room:Read",a),[l,,d,u]=(0,m.A)(!1),h=(0,A.useCallback)((e=>{e.stopPropagation(),r&&d()}),[r]),g=(0,D.CB)(a),p=(0,A.useMemo)((()=>g.filter((e=>{let{isMember:n,isAgent:t}=e;return n&&!t}))),[g]);return(0,A.useEffect)((()=>{null!==p&&void 0!==p&&p.length&&(t||o(p[0]))}),[p,t]),(0,E.jsxs)(E.Fragment,{children:[(0,E.jsx)(v.A,{"data-testid":"workspaceRooms-menuList",disabled:!c,headerTestId:"workspaceRooms-warRooms",isOpen:!0,label:(0,E.jsxs)(i.Flex,{padding:[1,0],margin:[0,0,1],flex:!0,justifyContent:"between",alignItems:"center",height:"24px",children:[(0,E.jsxs)(i.Flex,{alignItems:"center",gap:2,children:[(0,E.jsx)(i.Icon,{name:"space_new",color:"textNoFocus",width:"14px",height:"14px"}),(0,E.jsx)(i.TextSmall,{color:"textNoFocus",children:"Select a room"})]}),(0,E.jsx)(i.Flex,{alignItems:"center",gap:2,children:(0,E.jsx)(s.A,{content:"Create a new room",align:"right",children:(0,E.jsx)(b.A,{permission:"room:Create",icon:"plus",onClick:h,"data-testid":"workspaceRooms-addWarRoom-button",spaceId:a})})})]}),children:(0,E.jsx)(i.Flex,{column:!0,"data-testid":"workspaceRooms-warRoomsList",padding:[0,0,1],children:p.map((e=>{const{id:n}=e;return(0,E.jsx)(x.A,{id:n,hideAlerts:!0,Wrapper:i.TextSmall,isSidebar:!0,selectedId:null===t||void 0===t?void 0:t.id,onClick:()=>o(e)},n)}))})}),l&&(0,E.jsx)(I.n,{onClose:u,isSubmodal:!1})]})};var R=t(55429),S=t(19186),P=t(46587),F=t(63872),Y=t(47090),U=t(81931),N=t(64973),j=t(58042),z=t(3319);function H(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function O(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?H(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):H(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}var L=t(64156),G=t(94404),J=t(60908);const q=(0,c.default)(i.ModalContent).attrs({width:{base:"80vw"},height:{base:"80vh",min:"80vh"}}).withConfig({displayName:"modal__ModalContent",componentId:"sc-4dwymj-0"})(["box-shadow:0 18px 28px rgba(0,0,0,0.5);"]),K=(0,G.A)(i.Button),V=e=>{let{claim:n,loading:t,error:o,privateKey:a,setPrivateKey:s,selectedSpace:r,setSelectedSpace:c,selectedRoom:u,keyFilename:h,cmd:m,help:g}=e;const p=(0,L.A)("(max-width: 1680px)"),f=(0,A.useMemo)((()=>m||"sudo cat ".concat(h)),[m,h]),y="$ ".concat(f),b=(0,A.useMemo)((()=>g||"Run the command and paste here the key it will give you. If the command doesn&apos;t work out of the box, locate the {keyFilename} file, open it in your favorite text editor, and copy it to your clipboard."),[g]),w=(0,A.useMemo)((()=>p?{base:"100%",max:"600px"}:"60%"),[p]);return r?u?(0,E.jsxs)(E.Fragment,{children:[(0,E.jsx)(d.m,{}),(0,E.jsxs)(i.H3,{textAlign:"center",children:['You are ready to connect your agent in "',u.name,'" of "',r.name,'"']}),(0,E.jsx)(i.Text,{textAlign:"center",children:"Last step is to ensure you're the admin of this agent ;)"}),(0,E.jsx)(i.TextSmall,{textAlign:"center",children:"We've created a file with a random key. Can you read it?"}),(0,E.jsx)(l.Ay,{"data-testid":"key-filename-command",commandText:f,width:w,children:y}),(0,E.jsx)(i.Flex,{column:!0,round:!0,background:"successSemi",border:"primary",padding:[2],width:w,children:(0,E.jsxs)(i.TextSmall,{children:[(0,E.jsx)(i.TextSmall,{strong:!0,children:"Tip:"})," ",b]})}),(0,E.jsx)(i.TextInput,{value:a,placeholder:"Paste private key here",onChange:e=>s(e.target.value),containerStyles:{width:w}}),(0,E.jsx)(K,{label:"Claim your agent",onClick:n,disabled:!a||t,isLoading:t,"data-ga":"claiming::claim-agent::modal",payload:{space:null===r||void 0===r?void 0:r.id,room:null===u||void 0===u?void 0:u.id,privateKey:a}})]}):(0,E.jsxs)(E.Fragment,{children:[(0,E.jsx)(d.m,{}),(0,E.jsxs)(i.H3,{textAlign:"center",children:['Select a room in "',r.name,'" for this agent']}),(0,E.jsx)(i.TextBig,{color:"textDescription",textAlign:"center",children:"or create a new one by hitting the [+] button."})]}):(0,E.jsxs)(E.Fragment,{children:[(0,E.jsx)(d.m,{}),(0,E.jsx)(i.H3,{textAlign:"center",children:"Let's connect your Agent"}),(0,E.jsx)(i.Flex,{width:90,justifyContent:"center",children:(0,E.jsx)(i.TextBigger,{color:"textDescription",textAlign:"center",lineHeight:1.5,children:"Select the Space you want this Agent to join or create a new one by hitting the [+] button."})}),(0,E.jsx)(T,{column:!!r,selectedSpace:r,setSelectedSpace:c,showFullname:!0,width:"80%"})]})},X=e=>{let{onClose:n,keyFilename:t,cmd:o,help:a}=e;const{claim:s,selectedSpace:c,setSelectedSpace:l,selectedRoom:d,setSelectedRoom:u,loading:h,error:m,privateKey:g,setPrivateKey:p}=(e=>{const[n,t]=(0,A.useState)(),[o,i]=(0,A.useState)(),[a,s]=(0,A.useState)(""),c=(0,R.A)(null===n||void 0===n?void 0:n.id),l=(0,S.GJ)(),d=window.localNetdataRegistry.mg,[{claiming:u,claimingError:h},m]=(0,r.RJ)(d),g=(0,P.uW)("isAnonymous"),[p,f]=(0,F.A)(),y=(0,r.OS)(),b=(0,N.OS)(),E=(0,j.OS)(),{sendLog:w,isReady:B}=(0,z.A)();return{claim:(0,A.useCallback)((()=>{a&&!u&&c&&d&&l&&!g&&o&&(m({claiming:!0}),(0,U.j)({key:a,token:null===c||void 0===c?void 0:c.token,rooms:[o.id],url:window.envSettings.apiUrl}).then((t=>{let{data:i}=t;if(!i.success)return m({claiming:!1,claimingError:i.message,claimId:null}),f({message:i.message}),void w({feature:"claim-node",isFailure:!0,message:i.message});y(),b(),E(),m(O(O({claiming:!1},i),{},{claimingError:""})),e(),p({header:"Your agent got connected to Netdata",text:"You can see your agent in the room ".concat(o.name," of ").concat(n.name," space.")}),w({feature:"claim-node",isSuccess:!0})})).catch((e=>{var n;const t=null===e||void 0===e||null===(n=e.response)||void 0===n?void 0:n.data,o=(0,Y.o)(null===t||void 0===t?void 0:t.errorMsgKey)||(null===t||void 0===t?void 0:t.errorMessage)||"Something went wrong";m({claiming:!1,claimingError:o,claimId:null}),f({message:o}),w({feature:"claim-node",isFailure:!0,message:o})})))}),[c,a,d,l,g,u,w,B]),selectedSpace:n,setSelectedSpace:t,selectedRoom:o,setSelectedRoom:i,loading:u,error:h,privateKey:a,setPrivateKey:s}})(n);return(0,E.jsx)(i.Modal,{zIndex:80,backdropProps:{backdropBlur:!0},children:(0,E.jsx)(J.Ay,{feature:"ClaimModal",children:(0,E.jsx)(i.Flex,{column:!0,alignItems:"end",gap:3,children:(0,E.jsxs)(q,{tabIndex:0,children:[(0,E.jsxs)(i.ModalHeader,{justifyContent:"between",children:[(0,E.jsx)(i.Flex,{gap:2,alignItems:"center",children:"Connect your Agent to Netdata Cloud"}),(0,E.jsx)(i.ModalCloseButton,{onClose:n,testId:"claim-modal-close-button","data-ga":"claiming::close-claim-modal::local"})]}),(0,E.jsxs)(i.ModalBody,{overflow:{vertical:"auto"},padding:[0],height:"100%",column:!1,children:[(0,E.jsx)(i.Collapsible,{background:"sideBar",open:!!c,direction:"horizontal",children:(0,E.jsx)(T,{column:!0,selectedSpace:c,setSelectedSpace:l,background:"sideBarMini",height:"100%"})}),(0,E.jsx)(i.Collapsible,{background:"sideBar",open:!!c,direction:"horizontal",children:(0,E.jsx)(i.Flex,{flex:!0,width:56,column:!0,overflow:{vertical:"auto"},padding:[2,0],children:!!c&&(0,E.jsx)(k,{selectedSpace:c,selectedRoom:d,setSelectedRoom:u})})}),(0,E.jsx)(i.Flex,{alignItems:"center",column:!0,"data-testid":"claim-modal-body",flex:!0,gap:3,justifyContent:"center",padding:[4],children:(0,E.jsx)(V,{selectedSpace:c,selectedRoom:d,setSelectedSpace:l,keyFilename:t,cmd:o,help:a,claim:s,loading:h,error:m,privateKey:g,setPrivateKey:p})})]})]})})})})};function W(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Z(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?W(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):W(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const $={unavailable:"Netdata is not available for this agent.",available:"Netdata is available. Click to claim it and gain the full benefits of Netdata!",disabled:"Netdata is available, but it is disabled, you can change the agent configuration to enable it.",banned:"The agent has been banned from cloud.",offline:"The agent tries to connect to Netdata, but it fails to do so.",online:"The agent is already connected to Netdata :)"},ee=(0,G.A)(i.Button),ne=(te=ee,e=>{const[n,,t,o]=(0,a.A)("claimModal"),[{canBeClaimed:i,cloudStatus:A,keyFilename:c,cmd:l,help:d}]=(0,r.RJ)();return(0,E.jsxs)(E.Fragment,{children:[(0,E.jsx)(s.A,{plain:!0,content:$[A],isBasic:!0,children:(0,E.jsx)("div",{children:(0,E.jsx)(te,Z(Z({label:"Connect",onClick:t},e),{},{disabled:!i,"data-ga":"claiming::open-claim-modal::local"}))})}),n&&(0,E.jsx)(X,{onClose:o,keyFilename:c,cmd:l,help:d})]})});var te},41936(e,n,t){"use strict";t.d(n,{Ay:()=>y,OS:()=>p,RJ:()=>f});var o=t(64467),i=(t(98992),t(54520),t(3949),t(62953),t(96540)),a=t(52035),s=t(34843),r=t(18790),A=t(19186),c=t(46587),l=t(47090),d=t(81931);function u(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function h(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?u(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):u(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const m=(0,r.I)((()=>(0,a.eU)({loaded:!1,loading:!1,cloudStatus:"disabled",canBeClaimed:!1,claimId:null,keyFilename:"",error:"",claiming:!1,claimingError:""}))),g=(0,r.I)((()=>(0,a.eU)(1))),p=()=>{const e=window.localNetdataRegistry.mg,n=(0,s.Xr)(g(e));return(0,i.useCallback)((()=>n((e=>e+1))),[e])},f=()=>{const e=window.localNetdataRegistry.mg,[n,t]=(0,s.fp)(m(e));return[n,(0,i.useCallback)((e=>t((n=>h(h({},n),e)),[])))]},y=()=>{const e=(0,A.GJ)(),n=window.localNetdataRegistry.mg,[t,o]=(0,s.fp)(g(n)),[a,r]=f(n),{loading:u}=a,m=(0,c.uW)("isAnonymous");return(0,i.useEffect)((()=>{!u&&n&&e&&(r({loading:!0,nodeId:null,spaceId:null,roomIds:[]}),(0,d.j)().then((e=>{let{data:n}=e;r(h(h({loading:!1,loaded:!0},n),{},{error:""}))})).catch((e=>{var n;const t=null===e||void 0===e||null===(n=e.response)||void 0===n?void 0:n.data;r({loading:!1,loaded:!0,error:(0,l.o)(null===t||void 0===t?void 0:t.errorMsgKey)||(null===t||void 0===t?void 0:t.errorMessage)||"Something went wrong",cloudStatus:"disabled",canBeClaimed:!1,keyFilename:""})})))}),[n,t,e,m]),h(h({},a),{},{checkAgain:o})}},52419(e,n,t){"use strict";t.d(n,{M:()=>o,d:()=>i});t(27495);const o=function(){return/(Community|Early)/.test(arguments.length>0&&void 0!==arguments[0]?arguments[0]:"")?"nightly":"stable"},i={nightly:{description:"Released at most once every 24 hours with fully-tested code that fixes bugs or security flaws, or introduces new features to Netdata. Every nightly release is a candidate for then becoming a stable release.",title:"Nightly"},stable:{description:"Released when a major milestone is reached. Stable releases might be a better choice for those who run Netdata in mission-critical production systems, as updates will come more infrequently.",title:"Stable"}}},23459(e,n,t){"use strict";t.d(n,{A:()=>s});var o=t(42358),i=t(74848);const a="node-status-indicator",s=e=>{let{statusText:n,selected:t,total:s,statusColor:r,isScreenSmall:A}=e;const{statusTextColor:c,indicatorWrapperColor:l,counterColor:d}=r;return(0,i.jsxs)(o.Flex,{alignItems:"center",gap:2,children:[!A&&(0,i.jsx)(o.TextSmall,{"data-testid":"".concat(a,"-text-").concat(n),color:c,children:n}),(0,i.jsx)(o.Flex,{justifyContent:"center",alignItems:"center",padding:[0,1],width:{min:7.5},height:5,background:l,round:!0,gap:A?.2:1,children:t===s?(0,i.jsx)(o.TextSmall,{"data-testid":"".concat(a,"-total-").concat(n),color:d,children:s}):(0,i.jsxs)(i.Fragment,{children:[(0,i.jsx)(o.TextSmall,{strong:!0,"data-testid":"".concat(a,"-selected-").concat(n),color:d,children:t}),(0,i.jsx)(o.TextSmall,{color:d,children:A?"/":"of"}),(0,i.jsx)(o.TextSmall,{"data-testid":"".concat(a,"-total-").concat(n),color:d,children:s})]})})]})}},91100(e,n,t){"use strict";t.d(n,{A:()=>g});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(42358),s=t(9962),r=t(19075),A=t(2626),c=t(74848);const l=["critical","warning","error","testPrefix"],d=["id","testPrefix"];function u(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function h(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?u(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):u(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const m=e=>{let{critical:n,warning:t,error:o,testPrefix:r}=e,A=(0,i.A)(e,l);const d=(0,s.I)({error:o,testPrefix:r,flavour:n?"error":"disabledError",value:n}),u=(0,s.I)({error:o,testPrefix:r,flavour:t?"warning":"disabledWarning",value:t});return(0,c.jsx)(a.Flex,h(h({},A),{},{children:(0,c.jsx)(a.MasterCard,{"data-testid":"alertsMastercard",pillLeft:d,pillRight:u,size:"small"})}))},g=e=>{let{id:n,testPrefix:t}=e,o=(0,i.A)(e,d);const a=(0,A.Xt)(n),{critical:s,warning:l}=(0,r.AO)(a);return(0,c.jsx)(m,h({"data-testid":"alerts",testPrefix:t,critical:s,warning:l},o))}},96184(e,n,t){"use strict";t.d(n,{A:()=>m,q:()=>h});var o=t(64467),i=(t(98992),t(54520),t(3949),t(81454),t(51510)),a=t(42358),s=t(23459),r=t(71341),A=t(74848);function c(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function l(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?c(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):c(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const d=(0,i.default)(a.Flex).withConfig({displayName:"wrapper__NodesIndicatorWrapper",componentId:"sc-gy5ftg-0"})(["pointer-events:",";cursor:",";opacity:",";"],(e=>{let{disabled:n}=e;return n?"none":"auto"}),(e=>{let{disabled:n}=e;return n?"default":"pointer"}),(e=>{let{disabled:n}=e;return n?"0.7":"1"})),u={live:{counter:0,statusText:"Live"},stale:{counter:0,statusText:"Stale"},offline:{counter:0,statusText:"Offline"},unseen:{counter:0,statusText:"Unseen"}},h=e=>(0,A.jsx)(d,l(l({},e),{},{children:Object.keys(u).map((e=>(0,A.jsx)(s.A,{statusText:u[e].statusText,counter:u[e].counter,statusColor:r.P[e]},e)))})),m=d},52950(e,n,t){"use strict";t.d(n,{A:()=>E});var o=t(64467),i=t(80045),a=(t(98992),t(54520),t(3949),t(62953),t(96540)),s=t(42358),r=t(46587),A=t(99728),c=t(45087),l=t(94404),d=t(18739),u=t(18387),h=t(2652),m=t(44913),g=t(74848);const p=["tooltipProps"];function f(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function y(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?f(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):f(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const b=(0,l.A)(s.Button),E=e=>{let{tooltipProps:n={}}=e,t=(0,i.A)(e,p);const o=(0,r.uW)("isAnonymous"),{value:s}=(0,d.JN)(),{slug:l}=s||{},f=(0,u.Kj)(l),E=(0,A.JT)("billing:Manage"),[w]=(0,m.useStaticCoupon)(),B=(0,h.A)(w),[C,M]=(0,a.useState)();return(0,a.useEffect)((()=>{M(!o&&f&&E)}),[o,f,E]),C?(0,g.jsx)(c.A,y(y({plain:!0,content:"Upgrade to business plan and experience the full power of Netdata!",isBasic:!0},n),{},{children:(0,g.jsx)("div",{children:(0,g.jsx)(b,y({label:"Upgrade Now!",small:!0,isStart:!0,onClick:B,feature:"UpgradeToBusiness","data-testid":"upgradeToBusiness-header"},t))})})):null}},55093(e,n,t){"use strict";t.d(n,{A:()=>g,m:()=>m});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(51510),s=t(42358),r=t(74848);const A=["title","width","height"],c=["title","body","iconProps","animate","testId"];function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function d(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?l(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):l(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const u=(0,a.keyframes)(["from{opacity:0.4;}to{opacity:1;}"]),h=(0,a.default)(s.Icon).withConfig({displayName:"loader__StyledIcon",componentId:"sc-a76ek6-0"})(["width:",";height:",";animation:",";"],(e=>e.width),(e=>e.height),(e=>{let{animate:n}=e;return n?(0,a.css)([""," 1.6s ease-in infinite"],u):""})),m=e=>{let{title:n="Loading",width:t="208px",height:o="177px"}=e,a=(0,i.A)(e,A);return(0,r.jsx)(h,d({name:"netdata",color:"primary",title:n,"data-testid":"loading-logo",width:t,height:o},a))},g=e=>{let{title:n,body:t,iconProps:o,animate:a=!0,testId:A=""}=e,l=(0,i.A)(e,c);return(0,r.jsxs)(s.Flex,d(d({column:!0,height:"100vh",background:"mainBackground",width:"100%",justifyContent:"center",alignItems:"center","data-testid":"loader-container-".concat(A)},l),{},{children:[(0,r.jsx)(m,d(d({},o),{},{animate:a})),n&&(0,r.jsx)(s.H3,{color:"text",margin:[1,0,0],children:n}),t&&(0,r.jsx)(s.Text,{color:"text",margin:[4.5,0,0],children:t})]}))}},32742(e,n,t){"use strict";t.d(n,{A:()=>r});t(98992),t(54520),t(3949);var o=t(64467),i=t(42358),a=t(74848);function s(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const r=e=>(0,a.jsx)(i.Icon,function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?s(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):s(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({name:"netdataPress",color:"success",height:"32px",width:"32px"},e))},98595(e,n,t){"use strict";t.d(n,{A:()=>h,N:()=>d});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(51510),s=t(42358),r=t(74848);const A=["disabled","isOpen","toggleOpen","label","children","headerTestId","Header"];function c(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function l(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?c(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):c(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const d=(0,a.default)(s.H4).attrs({padding:[0,2],background:"error"}).withConfig({displayName:"list__DefaultListHeader",componentId:"sc-5df7lw-0"})(["cursor:pointer;pointer-events:",";"],(e=>{let{disabled:n}=e;return n?"none":"auto"})),u=e=>{let{disabled:n,toggleOpen:t,label:o,testid:i,Header:a=d}=e;return(0,r.jsx)(a,{"data-testid":i,onClick:t,opacity:n?"medium":void 0,disabled:n,children:o})},h=e=>{let{disabled:n,isOpen:t=!1,toggleOpen:o,label:a,children:c,headerTestId:d,Header:h}=e,m=(0,i.A)(e,A);return(0,r.jsxs)(s.Flex,l(l({column:!0},m),{},{children:[(0,r.jsx)(u,{disabled:n,Header:h,toggleOpen:o,label:a,testid:d}),(0,r.jsx)(s.Collapsible,{open:t,children:c})]}))}},93335(e,n,t){"use strict";t.d(n,{N:()=>l,W:()=>d});var o=t(19075),i=t(2626),a=t(87083),s=t(41716),r=t(91100),A=t(74848);const c=e=>{let{critical:n,warning:t,error:o}=e;return(0,A.jsx)(r.A,{critical:n,"data-testid":"tabAlerts",error:o,testPrefix:"tabAlerts-alert",warning:t,margin:[0,0,0,1]})},l=e=>{let{nodeId:n}=e;const t=(0,i.Xt)(n),{critical:a,warning:s}=(0,o.AO)(t);return(0,A.jsx)(c,{critical:a,warning:s})},d=()=>{const e=(0,a.w7)({emptyIfAll:!1}),n=(0,i.YS)(e),t=(0,s.s)("error"),{critical:r,warning:l}=(0,o.AO)(n);return(0,A.jsx)(c,{critical:r,warning:l,error:t})}},77234(e,n,t){"use strict";t.d(n,{x:()=>o});const o={home:"home",overview:"overview",nodes:"nodes",k8s:"k8s",top:"top",logs:"logs",dashboards:"dashboards",dashboard:"dashboard",alerts:"alerts",ar:"ar",events:"events",insights:"insights"}},47413(e,n,t){"use strict";t.d(n,{A:()=>p});var o=t(64467),i=(t(98992),t(54520),t(3949),t(81454),t(96540)),a=t(41344),s=t(1817),r=t(44245),A=t(93335),c=t(42358),l=t(19075),d=t(74848);const u={warning:"warning",critical:"error"},h=e=>{let{alertId:n}=e;const t=(0,l.JL)(n,"status");return u[t]?(0,d.jsx)(c.Flex,{alignSelf:"center",margin:[0,0,0,2],round:1,background:u[t],width:2,height:2}):null};function m(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function g(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?m(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):m(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const p=e=>{const n=(0,a.Zp)(),t=(0,a.RQ)("/spaces/:spaceSlug/rooms/:roomSlug/:type/*"),o=(0,r._F)(),c=(0,r.xK)(),l=(0,r.NU)(),u=t?t.pathnameBase:"/spaces",{pathname:m}=(0,a.zy)(),p=(0,s.A)(e[m]&&m,!0),f=(0,i.useCallback)(((e,t)=>{if(!t)return c(e);const i=o[e-1],a=o[e+1],s=(t,o)=>{const i="".concat(t).concat(o?"/".concat(o):"");n(i),c(e)};return p?s(p):i?s(i.path,i.params):a?s(a.path,a.params):s(u)}),[o,c,u,p]);return[(0,i.useMemo)((()=>o.map((e=>g(g({},e),{},{children:e.id&&"nodes"===e.type?(0,d.jsx)(A.N,{nodeId:e.id}):"alerts"===e.type?(0,d.jsx)(h,{alertId:e.id}):null})))),[o]),(e,n)=>{-1===e&&-1===n||l({sourceIndex:e,destinationIndex:n})},f]}},17178(e,n,t){"use strict";t.d(n,{A:()=>b});t(98992),t(54520),t(3949);var o=t(64467),i=t(96540),a=t(41344),s=t(19186),r=t(24609),A=t(93335),c=t(41716),l=t(93883),d=t(93912),u=t(74848);const h=()=>{const e=(0,c.s)("error"),n=(0,c.s)("updatedAt");return(0,u.jsx)(d.A,{title:"Alerts & Notifications",description:"View raised and configured alerts across your infrastructure.",children:(0,u.jsx)(l.A,{error:e,updatedAt:n})})};var m=t(99728),g=t(39175),p=t(77234);function f(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function y(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?f(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):f(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const b=()=>{const e=(0,s.r9)(),n=(0,a.RQ)("/spaces/:spaceSlug/rooms/:roomSlug/insights/report-preview/:reportId"),t=(0,m.JT)("dashboard:ReadAll"),o=(e=>{let{path:n,matchReportPreview:t}=e;return(0,i.useMemo)((()=>y(y({[p.x.home]:{id:"home",title:"Home",icon:"room_home",path:"".concat(n,"/home"),exact:!0,dataGa:"view-picker::click-view-home::global-view",testId:"viewPicker-home",tooltip:{title:"Summary",description:"An overview of your infrastructure."}},[p.x.overview]:{id:"overview",title:"Metrics",icon:"room_overview",path:"".concat(n,"/overview"),exact:!0,dataGa:"view-picker::click-view-overview::global-view",testId:"viewPicker-overview",tooltip:{title:"Metrics Explorer",description:"All metrics from all nodes, containers, VMs, SNMP devices, applications, and synthetic checks, in one universal, aggregated, infrastructure-level dashboard."}},[p.x.nodes]:{id:"nodes",title:"Nodes",icon:"nodes_hollow",path:"".concat(n,"/nodes"),exact:!0,dataGa:"view-picker::click-view-nodes::global-view",testId:"viewPicker-nodes",tooltip:{title:"Nodes Explorer",description:"All your infrastructure nodes."}}},!window.envSettings.isAgent&&!window.envSettings.onprem&&{[p.x.k8s]:{id:"k8s",title:"K8s",icon:"serviceKubernetes",path:"".concat(n,"/kubernetes"),exact:!0,dataGa:"view-picker::click-view-kubernetes::global-view",testId:"viewPicker-kubernetes",tooltip:{title:"Kubernetes Explorer",description:"All Kubernetes clusters, pods, containers and workloads."}}}),{},{[p.x.top]:{id:"top",title:"Live",icon:"live",path:"".concat(n,"/top"),exact:!0,dataGa:"view-picker::click-view-fn::global-overview",testId:"viewPicker-fn",tooltip:{title:"Live View",description:"Live, on-demand insights including processes, network connections, database queries, topology maps & more."}},[p.x.logs]:{id:"logs",title:"Logs",icon:"logs",path:"".concat(n,"/logs"),exact:!0,dataGa:"view-picker::click-view-logs::global-overview",testId:"viewPicker-logs",tooltip:{title:"Logs Explorer",description:"Search, filter, and analyze system and application logs."}},[p.x.dashboards]:{id:"dashboards",title:"Dashboards",icon:"dashboard",path:"".concat(n,"/dashboards"),exact:!0,dataGa:"view-picker::click-view-dashboard::global-view",testId:"viewPicker-customDashboards",tooltip:{title:"Custom Dashboards",description:"Create, and manage personalized dashboards tailored to your needs."},droppable:!0,droppableProps:{dropArea:!0,dropinDashboards:!0}},[p.x.dashboard]:{id:"dashboard",title:"Dashboard",icon:"dashboard",path:"".concat(n,"/dashboard"),exact:!0,dataGa:"view-picker::click-view-dashboard::global-view",testId:"viewPicker-customDashboard",tooltip:{title:"Custom Dashboard",description:"Edit a personalized dashboard tailored to your needs."},droppable:!0,droppableProps:{dropArea:!0,dashboardId:g.LA}},[p.x.alerts]:{id:"alerts",title:"Alerts",icon:"alarm",path:"".concat(n,"/alerts"),exact:!0,dataGa:"view-picker::click-view-alerts::global-view",testId:"viewPicker-alerts",tooltip:(0,u.jsx)(h,{}),children:(0,u.jsx)(A.W,{})},[p.x.ar]:{id:"ar",title:"Anomalies",icon:"anomaliesLens",path:"".concat(n,"/anomalies"),exact:!0,dataGa:"view-picker::click-view-anomalies::global-view",testId:"viewPicker-anomalies",tooltip:{title:"Anomaly Advisor",description:"Machine-learning powered root cause analysis, blast radius detection, and cascading effect tracing across nodes."}},[p.x.events]:{id:"events",title:"Events",icon:"feed",path:"".concat(n,"/events"),exact:!0,dataGa:"view-picker::click-view-feed::global-view",testId:"viewPicker-feed",tooltip:{title:"Activity Feed",description:"Alert transitions and notifications, node connections and disconnections, administrative events."}},[p.x.insights]:{id:"insights",title:"AI Insights",icon:"ai",path:"".concat(n,"/insights"),exact:!t,dataGa:"view-picker::click-view-insights::global-view",testId:"viewPicker-insights",tooltip:{title:"AI Insights and Reports",description:"Automatically investigate, troubleshoot and generate deep dive infrastructure analysis reports using Netdata AI."},iconColor:"primaryAI"}})),[n,t])})({path:e,matchReportPreview:n}),c=(0,r.dg)(),l=(0,s.GJ)();var d,f;return[(0,i.useMemo)((()=>[!c&&!l&&o.home,o.nodes,o.overview,o.top,o.logs,!c&&!window.envSettings.onprem&&o.k8s,!c&&t&&o.dashboards,c&&t&&o.dashboard,o.alerts,o.events,o.ar,o.insights].filter(Boolean)),[e,n,c]),(d=e,f=!c,(0,i.useMemo)((()=>({["".concat(d,"/overview")]:!0,["".concat(d,"/home")]:!0,["".concat(d,"/nodes")]:!0,["".concat(d,"/dashboards")]:!0,["".concat(d,"/dashboard")]:!0,["".concat(d,"/alerts")]:!0,["".concat(d,"/anomalies")]:!0,["".concat(d,"/top")]:!0,["".concat(d,"/logs")]:!0,["".concat(d,"/events")]:!0,["".concat(d,"/insights")]:!0})),[d,f]))]}},9962(e,n,t){"use strict";t.d(n,{I:()=>s});t(98992),t(54520),t(3949);var o=t(64467);function i(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function a(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?i(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):i(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const s=e=>{let{error:n,testPrefix:t,onClick:o,flavour:i,value:s}=e;return a(a({"data-testid":t?"".concat(t,"-").concat(i):i,flavour:i},o&&{onClick:o}),{},{text:n?"-":"".concat(s)})}},90930(e,n,t){"use strict";t.d(n,{A:()=>a});var o=t(42358),i=t(74848);const a=()=>(0,i.jsxs)(o.Flex,{column:!0,width:{max:"200px"},gap:2,children:[(0,i.jsxs)(o.TextMicro,{children:[(0,i.jsx)(o.TextMicro,{strong:!0,children:"Live:"})," Node is actual collecting and streaming metrics to Netdata"]}),(0,i.jsxs)(o.TextMicro,{children:[(0,i.jsx)(o.TextMicro,{strong:!0,children:"Stale:"})," Node is currently offline and not streaming metrics to Netdata. It can show historical data from a parent node"]}),(0,i.jsxs)(o.TextMicro,{children:[(0,i.jsx)(o.TextMicro,{strong:!0,children:"Offline:"})," Node is currently offline, not streaming metrics to Netdata and not available in any parent node"]}),(0,i.jsxs)(o.TextMicro,{children:[(0,i.jsx)(o.TextMicro,{strong:!0,children:"Unseen:"})," Nodes have never been connected to Netdata, they are claimed but no successful connection was established"]})]})},37617(e,n,t){"use strict";t.d(n,{A:()=>A});t(27495);var o=t(96540),i=t(41344),a=t(24609),s=t(30403),r=t(92318);const A=()=>{const{pathname:e}=(0,i.zy)(),n=(0,a.bq)(),{onboardingView:t}=r.A;return{isOnboardingPath:/\/onboarding(\/|$)/.test(e),path:(0,o.useMemo)((()=>n?"/spaces/".concat(n,"/rooms/").concat(s.mL,"/").concat(t):null),[n,t])}}},40531(e,n,t){"use strict";t.d(n,{A:()=>C});var o=t(64467),i=t(80045),a=(t(98992),t(54520),t(3949),t(62953),t(96540)),s=t(42358),r=t(24609),A=t(99728),c=t(46587),l=t(18739),d=t(2652);const u={Business:!0,Pro:!0,Homelab:!0,Community:!1};var h=t(74891),m=t(44913),g=t(94404),p=t(74848);const f=["getShouldUpgrade","onClick"],y=["getShouldUpgrade"];function b(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function E(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?b(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):b(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const w=(0,g.A)((0,h.A)(s.Button)),B=e=>{let{getShouldUpgrade:n,onClick:t}=e,o=(0,i.A)(e,f);const{loaded:s,value:r}=(0,l.JN)(),c=(0,A.JT)("billing:Manage"),[h]=(0,m.useStaticCoupon)(),g=(0,d.A)(h),y=(0,a.useCallback)((()=>{g(),"function"===typeof t&&t()}),[g]),b=(0,a.useMemo)((()=>!!s&&("function"===typeof n?n(r):!(null===r||void 0===r||!r.class)&&!function(){return u[(arguments.length>0&&void 0!==arguments[0]?arguments[0]:{}).class]}(r))),[s,r,n]),B=(0,a.useMemo)((()=>b?c?"Upgrade your plan to use this feature":"You don't have sufficient permissions to upgrade the plan":null),[c,b]),C=(0,a.useMemo)((()=>E({label:"Upgrade now!",onClick:y,tooltip:B,disabled:!c,noWrapper:!0,feature:"UpgradeToBusiness"},o)),[y,B,c,o]);return b?(0,p.jsx)(w,E({},C)):null},C=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},{getShouldUpgrade:n}=e,t=(0,i.A)(e,y);const o=(0,c.uW)("isAnonymous"),a=(0,r.dg)();return o||a?null:(0,p.jsx)(B,E({getShouldUpgrade:n},t))}},61337(e,n,t){"use strict";t.d(n,{A:()=>i});var o=t(99728);const i=e=>{let{permission:n,spaceId:t,children:i}=e;return(0,o.JT)(n,t)?i:null}},51571(e,n,t){"use strict";t.d(n,{A:()=>l});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(42358),s=t(99728),r=t(74848);const A=["ifForbidden","permission","spaceId","Component","ref"];function c(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const l=e=>{let{ifForbidden:n="disabled",permission:t,spaceId:l,Component:d=a.Button,ref:u}=e,h=(0,i.A)(e,A);const m=(0,s.JT)(t,l);return"hide"!==n||m?(0,r.jsx)(d,function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?c(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):c(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({ref:u,disabled:"disabled"===n&&!m||h.disabled},h)):null}},1174(e,n,t){"use strict";t.d(n,{A:()=>l});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(99728),s=t(74848);const r=["Component","ifForbidden","permission","children","spaceId"];function A(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function c(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?A(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):A(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const l=e=>{let{Component:n,ifForbidden:t="disabled",permission:o,children:A,spaceId:l}=e,d=(0,i.A)(e,r);const u=(0,a.JT)(o,l);return"hide"!==t||u?"function"===typeof A?A(u):n?(0,s.jsx)(n,c(c({},d),{},{disabled:"disabled"===t&&!u||d.disabled})):A:null}},49676(e,n,t){"use strict";t.d(n,{A:()=>o});const o=t(51510).default.div.withConfig({displayName:"resizableContainer__ResizableContainer",componentId:"sc-1v3pbaz-0"})(["resize:both;overflow:hidden;position:relative;pointer-events:auto;isolation:isolate;&::-webkit-resizer{display:none;}"])},7066(e,n,t){"use strict";t.d(n,{A:()=>a});var o=t(51510),i=t(42358);const a=(0,o.default)(i.IconButton).attrs({icon:"resize_handler",padding:[0],position:"absolute",bottom:0,right:0,hoverColor:"textDescription",width:"14px",height:"14px"}).withConfig({displayName:"resizeHandler__ResizeHandler",componentId:"sc-1cq5w9q-0"})(["&&{cursor:nwse-resize;pointer-events:none;}"])},27587(e,n,t){"use strict";t.d(n,{A:()=>d});var o=t(64467),i=(t(98992),t(54520),t(3949),t(81454),t(96540)),a=t(42358),s=t(49916),r=t(74848);function A(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function c(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?A(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):A(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const l={closeMenuOnSelect:!1,backspaceRemovesValue:!1,isClearable:!1,blurInputOnSelect:!0,captureMenuScroll:!0,isMulti:!0},d=e=>{let{selectedValue:n,onChange:t,formatOptions:o,filterValues:A,placeholder:d="search..."}=e;const u=(0,s.DL)(),h=(0,i.useMemo)((()=>u.map((e=>{let{id:n,name:t}=e;return c({label:t,value:n},o&&o({id:n,name:t}))}))),[u,o]),m=(0,i.useMemo)((()=>h.filter((e=>{let{label:t,value:o}=e;return n.includes(o)||A&&A({label:t,value:o})}))),[h,n,A]),g=(0,i.useCallback)((e=>{const n=e.map((e=>{let{value:n}=e;return n}));t(n)}),[t]);return(0,r.jsx)(a.Select,c(c({placeholder:d},l),{},{options:h,value:m,onChange:g}))}},54835(e,n,t){"use strict";t.d(n,{A:()=>Ve});t(98992),t(54520),t(3949);var o=t(64467),i=t(42358),a=t(63950),s=t.n(a),r=t(80045),A=t(96540),c=t(74848);const l=["children"],d=(0,A.createContext)(),u=()=>(0,A.use)(d),h=e=>{let{children:n}=e,t=(0,r.A)(e,l);return(0,c.jsx)(d,{value:t,children:n})};t(72577),t(62953);var m=t(92255),g=t(80696),p=t(64587),f=t(42539),y=t(65408);t(81454);const b={untilTurnedOff:"untilTurnedOff",oneHour:"oneHour",sixHours:"sixHours",twelveHours:"twelveHours",oneDay:"oneDay",custom:"custom"},E={startDate:new Date,endDate:void 0,customDuration:b.untilTurnedOff,recurrenceValue:y.iX.value,byDayValue:void 0,byMonthValue:void 0,byMonthDayValue:void 0,intervalNumberValue:"1",intervalUnitValue:y.os.value,endOptionValue:y.Pu.never,repeatOnMonthlyOption:y.Qo.byMonthDay,repeatOnYearlyOption:y.Qo.byMonthDay,recurrenceCount:"1"},w={untilTurnedOff:"Until turned off",oneHour:"After 1 hour",sixHours:"After 6 hours",twelveHours:"After 12 hours",oneDay:"After 1 day",custom:"Custom"},B=Object.keys(b).map((e=>({value:e,label:w[e]})));function C(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function M(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?C(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):C(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const T=e=>{const n=new Date(e),t=n.getUTCFullYear(),o=n.getUTCMonth()+1,i=n.getUTCDate(),a=n.getUTCHours(),s=n.getUTCMinutes();return(0,m.w$)(t,o,i,a,s)},I=e=>{if(e)return Array.isArray(e)?e.length?e:void 0:[e]};function v(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function _(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?v(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):v(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Q=e=>{let{isEdit:n,rrule:t,onDatesChange:o,onRuleChange:i,initialState:a={}}=e;const{options:s}=(e=>{try{return(0,m.YI)(e)}catch(n){return{}}})(t),r=function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},t=arguments.length>2?arguments[2]:void 0;const o=M(M({},E),n);if("object"!==typeof e)return o;const i=(null===e||void 0===e?void 0:e.interval)>1||e.count||e.until?y.Th.value:m.p3.FREQUENCIES[e.freq],a=null!==e&&void 0!==e&&e.until?y.Pu.on:null!==e&&void 0!==e&&e.count?y.Pu.after:y.Pu.never;return M(M(M(M(M(M(M(M(M(M({},o),{},{startDate:new Date(e.dtstart),recurrenceValue:i,intervalNumberValue:e.interval,endOptionValue:a,intervalUnitValue:m.p3.FREQUENCIES[e.freq]},t?{customDuration:b.custom}:{}),e.byweekday?{byDayValue:e.byweekday}:{}),e.bymonth?{byMonthValue:e.bymonth}:{}),e.bymonthday?{byMonthDayValue:e.bymonthday}:{}),e.bysetpos?{bySetPosValue:e.bysetpos}:{}),e.until?{recurrenceEndDate:new Date(e.until)}:{}),e.count?{recurrenceCount:e.count}:{}),e.freq===m.p3.MONTHLY?{repeatOnMonthlyOption:e.bysetpos?y.Qo.bySetPos:y.Qo.byMonthDay}:{}),e.freq===m.p3.YEARLY?{repeatOnYearlyOption:e.bysetpos?y.Qo.bySetPos:y.Qo.byMonthDay}:{})}(s,a,n),[c,l]=(0,A.useState)(r),[d,u]=(0,A.useState)(""),[h,w]=(0,A.useState)((0,g.R)());(0,A.useEffect)((()=>{if(c.recurrenceValue===y.iX.value)i(null);else{const e=I(c.byMonthDayValue),n=I(c.bySetPosValue),t=I(c.byMonthValue),o=_(_(_(_(_({freq:c.recurrenceValue===y.Th.value?y.wk[c.intervalUnitValue]:y.wk[c.recurrenceValue],dtstart:T(c.startDate),interval:Number(c.intervalNumberValue)},c.endOptionValue===y.Pu.on&&c.recurrenceEndDate?{until:T(c.recurrenceEndDate)}:{}),c.endOptionValue===y.Pu.after&&c.recurrenceCount?{count:Number(c.recurrenceCount)}:{}),[y.T6.week,y.T6.month,y.T6.year].includes(c.intervalUnitValue)&&c.byDayValue?{byweekday:c.byDayValue}:{}),[y.T6.month,y.T6.year].includes(c.intervalUnitValue)?_(_({},e?{bymonthday:e}:{}),n?{bysetpos:n}:{}):{}),y.T6.year&&t?{bymonth:t}:{}),a=new m.p3(o);i(a.toString()),u(a.toText())}}),[c,i,u]);const B=(0,A.useCallback)((e=>{let{value:n}=e;const t=n===y.Th.value,o=![y.iX.value,y.Th.value].includes(n);n===y.iX.value&&u(""),l((e=>_(_({},e),{},{recurrenceValue:n},o?{intervalNumberValue:"1",recurrenceCount:void 0,recurrenceEndDate:void 0,endOptionValue:E.endOptionValue}:t?{intervalNumberValue:e.intervalNumberValue||E.intervalNumberValue,recurrenceCount:e.recurrenceCount||E.recurrenceCount,endOptionValue:e.endOptionValue||E.endOptionValue}:{})))}),[l,u]),C=(0,A.useCallback)((e=>{l((n=>_(_({},n),{},{byMonthValue:e})))}),[l]),v=(0,A.useCallback)((e=>{l((n=>_(_({},n),{},{byMonthDayValue:e})))}),[l]),Q=(0,A.useCallback)((e=>{l((n=>_(_({},n),{},{bySetPosValue:Array.isArray(e)?e:[e]})))}),[l]),D=(0,A.useCallback)((e=>{l((n=>_(_({},n),{},{byDayValue:e})))}),[l]),x=(0,A.useCallback)((e=>{let{target:n}=e;l((e=>_(_({},e),{},{intervalNumberValue:n.value})))}),[l]),k=(0,A.useCallback)((e=>{let{value:n}=e;l((e=>_(_({},e),{},{intervalUnitValue:n,byDayValue:E.byDayValue,byMonthDayValue:E.byMonthDayValue,bySetPosValue:E.bySetPosValue,byMonthValue:E.byMonthValue})))}),[l]),R=(0,A.useCallback)((e=>{l((n=>{const t=e===y.Pu.on?{recurrenceEndDate:n.recurrenceEndDate||new Date}:{},o=e!==y.Pu.after?{recurrenceCount:1}:{};return _(_(_({},n),{},{endOptionValue:e},t),o)}))}),[l]),S=(0,A.useCallback)((e=>{l((n=>_(_({},n),{},{startDate:new Date(e)})))}),[l]),P=(0,A.useCallback)((e=>{l((n=>_(_({},n),{},{endDate:new Date(e)})))}),[l]),F=(0,A.useCallback)((e=>{l((n=>{const t=e===b.untilTurnedOff?void 0:((e,n)=>{if(!e)return e;switch(n){case b.untilTurnedOff:return;case b.oneHour:return(0,f.W)(e,{hours:1});case b.sixHours:return(0,f.W)(e,{hours:6});case b.twelveHours:return(0,f.W)(e,{hours:12});case b.oneDay:return(0,f.W)(e,{days:1});default:return(0,f.W)(e,{minutes:10})}})(n.startDate,e);return _(_({},n),{},{customDuration:e,endDate:t})}))}),[l]),Y=(0,A.useCallback)((e=>{l((n=>_(_({},n),{},{repeatOnMonthlyOption:e,byMonthDayValue:E.byMonthDayValue,bySetPosValue:E.bySetPosValue,byDayValue:E.byDayValue})))}),[l]),U=(0,A.useCallback)((e=>{l((n=>_(_({},n),{},{repeatOnYearlyOption:e,byMonthValue:E.byMonthValue,byMonthDayValue:E.byMonthDayValue,bySetPosValue:E.bySetPosValue,byDayValue:E.byDayValue})))}),[l]),N=(0,A.useCallback)((e=>{l((n=>_(_({},n),{},{recurrenceEndDate:new Date(e)})))}),[l]),j=(0,A.useCallback)((e=>{let{target:n}=e;l((e=>_(_({},e),{},{recurrenceCount:n.value})))}),[l]),{timezone:z}=(0,p.$j)(),H=(0,A.useMemo)((()=>{if(!c.startDate||!c.endDate||!z)return!1;try{const e=e=>{var n;return null===(n=new Intl.DateTimeFormat("en",{timeZone:z,timeZoneName:"shortOffset"}).formatToParts(e).find((e=>"timeZoneName"===e.type)))||void 0===n?void 0:n.value};return e(new Date(c.startDate))!==e(new Date(c.endDate))}catch(e){return!1}}),[c.startDate,c.endDate,z]);return(0,A.useEffect)((()=>{if(c.customDuration===b.custom){const e=c.startDate;w(e),c.startDate>=c.endDate&&l((n=>_(_({},n),{},{endDate:e})))}}),[c.startDate,c.endDate,c.customDuration,w,l]),(0,A.useEffect)((()=>{"function"===typeof o&&o({start:c.startDate,end:c.endDate})}),[c.startDate,c.endDate,o]),{startDate:c.startDate,endDate:c.endDate,customDuration:c.customDuration,recurrenceValue:c.recurrenceValue,byDayValue:c.byDayValue,byMonthValue:c.byMonthValue,byMonthDayValue:c.byMonthDayValue,bySetPosValue:c.bySetPosValue,intervalNumberValue:c.intervalNumberValue,intervalUnitValue:c.intervalUnitValue,endOptionValue:c.endOptionValue,repeatOnMonthlyOption:c.repeatOnMonthlyOption,repeatOnYearlyOption:c.repeatOnYearlyOption,recurrenceEndDate:c.recurrenceEndDate,recurrenceCount:c.recurrenceCount,ruleText:d,utcOffsetDiff:H,minStartDate:(0,g.R)(),minEndDate:h,onStartDateChange:S,onEndDateChange:P,onCustomDurationChange:F,onRecurrenceChange:B,onByMonthValueChange:C,onByMonthDayValueChange:v,onBySetPosValueChange:Q,onByDayValueChange:D,onIntervalNumberChange:x,onIntervalUnitChange:k,onEndOptionValueChange:R,onRepeatOnMonthlyOptionValueChange:Y,onRepeatOnYearlyOptionValueChange:U,onRecurrenceEndDateChange:N,onRecurrenceCountChange:j}},D=(0,A.createContext)(),x=()=>(0,A.use)(D),k=e=>{let{isEdit:n,rrule:t,onDatesChange:o,onRuleChange:i,initialState:a,children:s}=e;const r=Q({isEdit:n,rrule:t,onDatesChange:o,onRuleChange:i,initialState:a});return(0,c.jsx)(D,{value:r,children:s})};var R=t(51510),S=t(9618),P=t(6463);const F=()=>{const{customDuration:e,onCustomDurationChange:n}=x(),t=(0,A.useMemo)((()=>B.find((n=>n.value===e))),[e]),o=(0,A.useCallback)((e=>{let{value:t}=e;n(t)}),[n]);return(0,c.jsx)(i.Select,{value:t,options:B,onChange:o})};var Y=t(74217),U=t(427);function N(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function j(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?N(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):N(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const z=(0,R.default)(i.Box).withConfig({displayName:"dateInput__InputsGrid",componentId:"sc-130xqt-0"})(["display:grid;grid-template-columns:repeat(",",1fr);gap:8px;"],(e=>{let{isSinglePicker:n}=e;return n?"1":"2"})),H=(0,S.R)(),O=(0,P.o)((0,f.W)(H,{years:1})),L={height:"24px",background:"inputBg",border:{side:"all",color:"inputBorder"},padding:[0,2],round:.5,justifyContent:"start",fluid:!0},G=e=>{let{withDuration:n,labelProps:t={},tooltipContent:o,startDateLabel:a="Start date",endDateLabel:s="End date"}=e;const{offset:r}=(0,U.bO)(),{startDate:l,endDate:d,customDuration:u,minStartDate:h,minEndDate:m,onStartDateChange:g,onEndDateChange:p}=x(),{isSinglePicker:f,startDateValue:y,endDateValue:E}=(0,A.useMemo)((()=>j({isSinglePicker:!n,startDateValue:{singleDate:l}},d?{endDateValue:{singleDate:d}}:{})),[l,d,n]),w=(0,A.useMemo)((()=>{var e;return null!==E&&void 0!==E&&E.singleDate?"string"===typeof E.singleDate?E.singleDate:null===E||void 0===E||null===(e=E.singleDate)||void 0===e?void 0:e.toISOString():""}),[null===E||void 0===E?void 0:E.singleDate]);return(0,c.jsxs)(z,{"data-testid":"scheduler-dates-inputs-container",isSinglePicker:f,children:[(0,c.jsxs)(i.Flex,{column:!0,gap:1,children:[(0,c.jsx)(i.Text,j(j({color:"menuItem"},t),{},{children:a})),(0,c.jsx)(Y.A,{isSinglePicker:!0,hideCustomPeriods:!0,values:y,minDate:h,maxDate:O,utc:r,onChange:g,isPlaying:!1,accessorProps:L,padding:[4,0],width:"auto",accessorTooltipContent:o||"Select start date and time"})]}),f?null:(0,c.jsxs)(i.Flex,{column:!0,gap:1,children:[(0,c.jsx)(i.Text,j(j({color:"menuItem"},t),{},{children:s})),(0,c.jsxs)(i.Flex,{alignItems:"center",gap:2,children:[(0,c.jsx)(F,{}),u===b.custom?(0,c.jsx)(Y.A,{isSinglePicker:!0,hideCustomPeriods:!0,values:E,minDate:m,maxDate:O,utc:r,onChange:p,isPlaying:!1,accessorProps:j(j({},L),{},{width:"100%"}),padding:[4,0],width:"auto",accessorTooltipContent:o||"Select end date and time"},w):null]})]})]})};var J=t(99574),q=t(77148),K=t(7134);const V=()=>{const{startDate:e,endDate:n}=x(),t=(0,A.useMemo)((()=>e&&n&&(0,K.Vj)((0,K.Ds)((0,J.c)(e),(0,J.c)(n)))||"\u221e"),[e,n]);return(0,c.jsxs)(i.Flex,{alignItems:"center",gap:2,children:[(0,c.jsx)(q.A,{}),(0,c.jsxs)(i.TextSmall,{whiteSpace:"nowrap",children:["Duration: ",t]}),(0,c.jsx)(q.A,{})]})};function X(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function W(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?X(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):X(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Z=()=>{const{intervalNumberValue:e,intervalUnitValue:n,onIntervalNumberChange:t,onIntervalUnitChange:o}=x(),a=parseInt(e,10),s=(0,A.useMemo)((()=>a>1?y.FJ.map((e=>W(W({},e),{},{label:e.labelPlural}))):y.FJ),[a]),r=(0,A.useMemo)((()=>s.find((e=>{let{value:t}=e;return t===n}))),[n,s]);return(0,c.jsxs)(c.Fragment,{children:[(0,c.jsx)(i.TextInput,{width:"55px",type:"number",min:1,step:1,value:e,onChange:t,size:"small",round:1}),(0,c.jsx)(i.Select,{value:r,options:s,onChange:o,menuPlacement:"auto",styles:{minWidth:"70px"}})]})};t(89463);var $=t(74891);const ee=["isSelected"];function ne(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function te(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?ne(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):ne(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const oe=(0,R.default)(i.Flex).attrs((e=>{let{isSelected:n}=e;return te({width:"30px",height:"30px",round:"50%",alignItems:"center",justifyContent:"center",background:n?"primary":"inputBg",cursor:"pointer"},(0,r.A)(e,ee))})).withConfig({displayName:"circle__Container",componentId:"sc-1qhl36y-0"})([""]),ie=(0,$.A)(oe),ae=e=>{let{isMulti:n,value:t,label:o,selectedItems:a=[],buttonProps:s={},onItemClick:r}=e;const l=a.includes(t),d=l?"mainBackground":"text",u=(0,A.useCallback)((()=>{const e=n?a.includes(t)?a.filter((e=>e!==t)):[...a,t]:[t];r(e)}),[n,t,a,r]);return(0,c.jsx)(ie,te(te({isSelected:l},s),{},{onClick:u,children:(0,c.jsx)(i.TextSmall,{strong:!0,color:d,children:o})}))};function se(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function re(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?se(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):se(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Ae=e=>{let{value:n=[],items:t=[],isMulti:o,onChange:a}=e;return(0,c.jsx)(i.Flex,{alignItems:"center",gap:1,children:t.map((e=>(0,c.jsx)(ae,re(re({},e),{},{isMulti:o,selectedItems:n,onItemClick:a}),e.value)))})};function ce(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function le(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?ce(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):ce(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const de=[{label:"MO",description:"Monday",value:m.p3.MO.weekday},{label:"TU",description:"Tuesday",value:m.p3.TU.weekday},{label:"WE",description:"Wednesday",value:m.p3.WE.weekday},{label:"TH",description:"Thursday",value:m.p3.TH.weekday},{label:"FR",description:"Friday",value:m.p3.FR.weekday},{label:"SA",description:"Saturday",value:m.p3.SA.weekday},{label:"SU",description:"Sunday",value:m.p3.SU.weekday}],ue=[],he=de.map((e=>le(le({},e),{},{buttonProps:{textTransform:"",strong:!0,tooltip:e.description}}))),me=de.map((e=>({value:e.value,label:e.description}))),ge=e=>{let{flavour:n,value:t=ue,onChange:o,isMulti:a,isDisabled:s}=e;const r=(0,A.useMemo)((()=>"select"===n?a?me.filter((e=>t.includes(e.value))):me.find((e=>t.includes(e.value)))||"":he.filter((e=>t.includes(e.value))).map((e=>{let{value:n}=e;return n}))),[a,t,n]),l=(0,A.useCallback)((e=>{const n=Array.isArray(e)?e.map((e=>"object"===typeof e?e.value:e)):"object"===typeof e?[e.value]:[e];o(n)}),[o]);return"select"===n?(0,c.jsx)(i.Select,{value:r,options:me,isMulti:a,onChange:l,isDisabled:s,menuPlacement:"auto",styles:{minWidth:"120px"}}):(0,c.jsx)(Ae,{value:r,items:he,isMulti:a,onChange:l})};function pe(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function fe(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?pe(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):pe(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ye=()=>{const{labelProps:e}=u(),{byDayValue:n,onByDayValueChange:t}=x();return(0,c.jsxs)(i.Flex,{column:!0,gap:1,children:[(0,c.jsx)(i.Text,fe(fe({color:"menuItem"},e),{},{children:"Repeat on"})),(0,c.jsx)(ge,{value:n,isMulti:!0,onChange:t})]})},be=["option","item","onChange","children","TextComponent","labelProps"];function Ee(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function we(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Ee(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Ee(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Be=e=>{let{option:n,item:t,onChange:o,children:a,TextComponent:s=i.Text,labelProps:l={}}=e,d=(0,r.A)(e,be);const{value:u,label:h}=t,m=n===u,g=(0,A.useCallback)((()=>{o(u)}),[u,o]);return(0,c.jsxs)(i.Flex,we(we({alignItems:"center",gap:2},d),{},{children:[(0,c.jsx)(i.RadioButton,{checked:m,onChange:g}),(0,c.jsx)(s,we(we({color:"menuItem"},l),{},{children:h})),"function"===typeof a?a({isDisabled:!m}):a]}))},Ce=e=>{let{value:n,options:t,onChange:o}=e;return[(0,A.useMemo)((()=>Array.isArray(n)?n?t.filter((e=>n.includes(e.value))):"":t.find((e=>e.value===n))||""),[t,n]),(0,A.useCallback)((e=>{Array.isArray(e)?o(e.map((e=>e.value))):o(e.value)}),[o])]},Me=e=>{let{isYearly:n,isDisabled:t,styles:o}=e;const{byMonthValue:a,byMonthDayValue:s,onByMonthValueChange:r,onByMonthDayValueChange:A}=x(),[l,d]=Ce({value:a,options:y.s$,onChange:r}),[u,h]=Ce({value:s,options:y.QK,onChange:A});return(0,c.jsxs)(i.Flex,{alignItems:"center",gap:2,children:[n?(0,c.jsx)(i.Select,{value:l,options:y.s$,onChange:d,isDisabled:t,styles:o}):null,(0,c.jsx)(i.Select,{value:u,options:y.QK,onChange:h,isDisabled:t,styles:o})]})};function Te(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Ie(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Te(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Te(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ve=Object.values(y.Sk),_e=e=>{let{isYearly:n,isDisabled:t,styles:o}=e;const{labelProps:a}=u(),{byMonthValue:s,bySetPosValue:r,byDayValue:A,onByMonthValueChange:l,onBySetPosValueChange:d,onByDayValueChange:h}=x(),[m,g]=Ce({value:r,options:ve,onChange:d}),[p,f]=Ce({value:s,options:y.s$,onChange:l});return(0,c.jsxs)(i.Flex,{alignItems:"center",gap:2,children:[(0,c.jsx)(i.Select,{value:m,options:ve,onChange:g,isDisabled:t,menuPlacement:"auto",styles:o}),(0,c.jsx)(ge,{flavour:"select",value:A,onChange:h,isDisabled:t}),n?(0,c.jsxs)(c.Fragment,{children:[(0,c.jsx)(i.Text,Ie(Ie({color:"menuItem"},a),{},{children:"of"})),(0,c.jsx)(i.Select,{value:p,options:y.s$,onChange:f,isDisabled:t,styles:o})]}):null]})};function Qe(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function De(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Qe(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Qe(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const xe=()=>{const{labelProps:e}=u(),{repeatOnMonthlyOption:n,onRepeatOnMonthlyOptionValueChange:t}=x(),o=(0,A.useMemo)((()=>De(De({},e),{},{whiteSpace:"nowrap"})),[e]);return(0,c.jsxs)(i.Flex,{column:!0,gap:1,children:[(0,c.jsx)(i.Text,De(De({color:"menuItem"},e),{},{children:"Repeat on"})),(0,c.jsx)(Be,{height:"28px",option:n,item:y.wc.byMonthDay,onChange:t,labelProps:o,children:e=>{let{isDisabled:n}=e;return(0,c.jsx)(Me,{isDisabled:n,styles:{minWidth:"120px"}})}}),(0,c.jsx)(Be,{height:"28px",option:n,item:y.wc.bySetPos,onChange:t,labelProps:o,children:e=>{let{isDisabled:n}=e;return(0,c.jsx)(_e,{isDisabled:n})}})]})};function ke(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Re(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?ke(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):ke(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Se=()=>{const{labelProps:e}=u(),{repeatOnYearlyOption:n,onRepeatOnYearlyOptionValueChange:t}=x(),o=(0,A.useMemo)((()=>Re(Re({},e),{},{whiteSpace:"nowrap"})),[e]);return(0,c.jsxs)(i.Flex,{column:!0,gap:1,children:[(0,c.jsx)(i.Text,Re(Re({color:"menuItem"},e),{},{children:"Repeat on"})),(0,c.jsx)(Be,{height:"28px",option:n,item:y.Td.byMonthDay,onChange:t,labelProps:o,children:e=>{let{isDisabled:n}=e;return(0,c.jsx)(Me,{isYearly:!0,isDisabled:n,styles:{minWidth:"120px"}})}}),(0,c.jsx)(Be,{height:"28px",option:n,item:y.Td.bySetPos,onChange:t,labelProps:o,children:n=>{let{isDisabled:t}=n;return(0,c.jsx)(_e,{isYearly:!0,isDisabled:t,styles:{minWidth:"120px"},labelProps:e})}})]})},Pe=()=>{const{intervalUnitValue:e}=x();return e===y.T6.week?(0,c.jsx)(ye,{}):e===y.T6.month?(0,c.jsx)(xe,{}):e===y.T6.year?(0,c.jsx)(Se,{}):null},Fe=(0,S.R)(),Ye=(0,P.o)((0,f.W)(Fe,{years:1})),Ue={height:"28px",background:"inputBg",border:{side:"all",color:"inputBorder"},padding:[0,2],round:1,justifyContent:"start"},Ne=e=>{let{date:n,onChange:t}=e;const{offset:o}=(0,U.bO)();return(0,c.jsx)(Y.A,{isSinglePicker:!0,values:{singleDate:n?new Date(n):new Date},minDate:(0,g.R)(),maxDate:Ye,utc:o,onChange:t,isPlaying:!1,accessorProps:Ue,padding:[4,0],width:"auto",accessorTooltipContent:"Select end date and time"})};function je(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function ze(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?je(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):je(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const He=e=>{let{labelProps:n={}}=e;const{localeDateString:t,localeTimeString:o,utcOffset:a}=(0,p.$j)(),{endOptionValue:s,recurrenceCount:r,recurrenceEndDate:l,onEndOptionValueChange:d,onRecurrenceEndDateChange:u,onRecurrenceCountChange:h}=x(),m=(0,A.useMemo)((()=>l?"".concat(t(new Date(l))," ").concat(o(new Date(l),{secs:!1})," UTC").concat(a):"specific date"),[l,t,o,a]);return(0,c.jsxs)(i.Flex,{column:!0,gap:2,children:[(0,c.jsx)(i.Text,ze(ze({color:"menuItem"},n),{},{children:"Ends"})),(0,c.jsxs)(i.Flex,{column:!0,gap:2,children:[(0,c.jsx)(Be,{height:"28px",option:s,item:y.u7.never,onChange:d}),(0,c.jsx)(Be,{height:"28px",option:s,item:y.u7.on,onChange:d,children:e=>{let{isDisabled:t}=e;return t?(0,c.jsx)(i.Text,ze(ze({color:"menuItem"},n),{},{children:m})):(0,c.jsx)(Ne,{date:l,onChange:u})}}),(0,c.jsx)(Be,{height:"28px",option:s,item:y.u7.after,onChange:d,children:e=>{let{isDisabled:t}=e;return(0,c.jsxs)(c.Fragment,{children:[(0,c.jsx)(i.TextInput,{"data-testid":"rrule-prop-count",width:15,value:r,onChange:h,type:"number",min:1,step:1,size:"small",disabled:t}),(0,c.jsx)(i.Text,ze(ze({color:"menuItem"},n),{},{children:"occurence(s)"}))]})}})]})]})};function Oe(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Le(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Oe(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Oe(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Ge=()=>{const{labelProps:e}=u(),{recurrenceValue:n,ruleText:t,onRecurrenceChange:o}=x(),a=(0,A.useMemo)((()=>y.ZT.find((e=>e.value===n))||""),[n]),s=n===y.Th.value;return(0,c.jsxs)(i.Flex,{"data-testid":"scheduler-recurrence-container",width:"100%",column:!0,gap:4,children:[(0,c.jsxs)(i.Flex,{column:!0,gap:2,children:[(0,c.jsxs)(i.Flex,{height:"20px",alignItems:"center",gap:1,children:[(0,c.jsx)(i.Text,Le(Le({color:"menuItem"},e),{},{children:"Recurrence pattern"})),t?(0,c.jsx)(i.Pill,{flavour:"neutral",hollow:!0,size:"small",children:t}):null]}),(0,c.jsxs)(i.Flex,{alignItems:"center",gap:2,children:[(0,c.jsx)(i.Select,{"data-testid":"scheduler-recurrence-selector",value:a,options:y.ZT,onChange:o,menuPlacement:"auto"}),s?(0,c.jsx)(Z,{}):null]})]}),s?(0,c.jsxs)(c.Fragment,{children:[(0,c.jsx)(Pe,{labelProps:e}),(0,c.jsx)(He,{labelProps:e})]}):null]})},Je=()=>{const{utcOffsetDiff:e}=x();return e?(0,c.jsxs)(i.Flex,{gap:2,padding:[2],background:"warningSemi",border:{side:"all",color:"warning"},round:!0,children:[(0,c.jsx)(i.Icon,{name:"warning_triangle",color:"warning"}),(0,c.jsx)(i.TextSmall,{color:"warning",children:"UTC offeset difference exists between selected dates due to Daylight Saving Time (DST)."})]}):null};function qe(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Ke(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?qe(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):qe(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Ve=e=>{let{isEdit:n,rrule:t,startDate:o,endDate:a,onDatesChange:r,withDuration:A,labelProps:l={},onRuleChange:d=s(),initialState:u={},containerProps:m={},dateInputProps:g={}}=e;return(0,c.jsx)(i.Flex,Ke(Ke({"data-testid":"scheduler-component-container",column:!0,gap:4},m),{},{children:(0,c.jsx)(h,{labelProps:l,children:(0,c.jsxs)(k,{isEdit:n,rrule:t,onDatesChange:r,onRuleChange:d,initialState:Ke(Ke({},u),{},{startDate:o||new Date,endDate:a}),children:[(0,c.jsx)(G,Ke({withDuration:A,labelProps:l},g)),(0,c.jsx)(Je,{}),A?(0,c.jsx)(V,{}):null,(0,c.jsx)(i.Flex,{column:!0,flex:"grow",gap:4,round:!0,border:{side:"all",color:"border"},padding:[4],children:(0,c.jsx)(Ge,{labelProps:l})})]})})}))}},77148(e,n,t){"use strict";t.d(n,{A:()=>l});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(42358),s=t(74848);const r=["vertical","color"];function A(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function c(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?A(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):A(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const l=e=>{let{vertical:n,color:t="borderSecondary"}=e,o=(0,i.A)(e,r);return(0,s.jsx)(a.Box,c(c({as:"hr",height:n?"100%":"1px"},n?{}:{width:"100%"}),{},{sx:{borderWidth:n?"0px 0px 0px 1px":"1px 0px 0px 0px",borderColor:t,borderStyle:"solid"}},o))}},44926(e,n,t){"use strict";t.d(n,{t:()=>r});t(98992),t(54520),t(3949);var o=t(64467),i=t(51510),a=t(42358);function s(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const r=(0,i.default)(a.Flex).attrs((e=>function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?s(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):s(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({padding:[3,3,0],width:"100%",background:"modalHeaderBackground"},e))).withConfig({displayName:"tabs__TabHeader",componentId:"sc-1ramyo4-0"})([""])},93883(e,n,t){"use strict";t.d(n,{A:()=>c});t(62953);var o=t(96540),i=t(3941),a=t(42358),s=t(4617),r=t(427),A=t(74848);const c=e=>{let{text:n,updatedAt:t,error:c}=e;const[,l]=(0,o.useState)(),{locale:d}=(0,r.bO)(),u=((e,n)=>{if(!e)return"";const t=new Date(e);return t.getTime()?(0,s.A)(t,new Date,n):""})(t,d);return(0,i.A)((()=>l(Math.random())),1e3),n||c||u?(0,A.jsxs)(a.Flex,{column:!0,gap:2,children:[n&&(0,A.jsx)(a.TextBig,{color:"tooltipText",children:n}),c&&(0,A.jsxs)(a.Flex,{alignItems:"center",gap:2,children:[(0,A.jsx)(a.Icon,{width:14,height:12,color:"text",name:"warning_triangle"}),(0,A.jsxs)(a.TextBig,{children:["Error: ",u?"Data not updated":"No data"]})]}),u&&(0,A.jsxs)(a.TextBig,{children:[(0,A.jsx)(a.TextBig,{color:"tooltipText",children:"Last updated: "}),u]})]}):null}},80269(e,n,t){"use strict";t.d(n,{y:()=>d});t(27495),t(25440),t(98992),t(54520),t(81454),t(37550),t(62953);var o=t(96540),i=t(42358),a=t(64091),s=t(40298);const r=(0,t(51510).default)(i.Select).withConfig({displayName:"styled__StyledSelect",componentId:"sc-18k0ifb-0"})(["width:100%;"]);t(71517),t(11379),t(93777),t(14190),t(12359),t(86097),t(17273),t(27415),t(19929),t(37583),t(55122),t(20230),t(57268),t(79733),t(25509),t(65223),t(60321),t(41927),t(11632),t(64377),t(66771),t(12516),t(68931),t(52514),t(35694),t(52774),t(49536),t(21926),t(94483),t(16215);const A=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"name";return e.length?"string"===typeof e[0]?[...new Set(e)]:[...new Map(e.map((e=>[e[n],e]))).values()]:[]};var c=t(74848);const l=[],d=e=>{let{invitations:n,setInvitations:t}=e;const[d,u]=(0,o.useState)(l),[h,m]=(0,o.useState)(""),[g,p]=(0,o.useState)(""),f=(0,o.useCallback)((()=>p("")),[p]),y=(0,s.Vt)(),b=(0,o.useCallback)((e=>{m(e.toLowerCase())}),[m]),E=(0,o.useCallback)((()=>{u(l),t(l)}),[u,t]),w=(0,o.useCallback)((e=>{let{emails:n=l,invitations:o=l,isEmailValid:i}=e;const a=A(n).filter((e=>!y.includes(e)));if(u(a),!i)return void p("Invalid email");const{error:s}=(e=>{let{invitationEmails:n=[],emails:t=[]}=e;return{error:n.some((e=>t.includes(e)))?1===t.length?"Email already invited":"One or more emails have already been invited":void 0}})({invitationEmails:y,emails:n});if(s)return void p(s);const r=A(o);t(r),m(""),f()}),[d,y,t,u,m,f,p]),B=(0,o.useCallback)((e=>(0,a.B9)(e)&&!d.includes(e)),[d]),C=(0,o.useCallback)((()=>{h&&w({emails:[...d,h],invitations:[...n,{email:h,name:h.split("@")[0]}],isEmailValid:B(h)})}),[d,h,n,w,B]),M=(0,o.useCallback)((e=>{w({isEmailValid:!0,emails:e.map((e=>e.value)),invitations:e.map((e=>({email:e.value,name:e.value.split("@")[0]})))})}),[w]),T=(0,o.useCallback)((e=>{const t=e.clipboardData.getData("Text").toLowerCase().replace(/ /g,",").replace(/,,/g,",").split(",").filter((e=>B(e)))||l;w({emails:[...d,...t],invitations:[...n,...t.map((e=>({email:e,name:e.split("@")[0]})))],isEmailValid:t.length>0}),e.preventDefault()}),[d,n,B,w]),I=(0,o.useCallback)((e=>{if(h)switch(f(),e.key){case"Enter":case"Tab":case",":case" ":w({emails:[...d,h],invitations:[...n,{email:h,name:h.split("@")[0]}],isEmailValid:B(h)}),e.preventDefault()}}),[d,h,n,B,w]);return(0,c.jsxs)(i.Flex,{justifyContent:"between",column:!0,onPaste:T,children:[(0,c.jsx)(r,{components:{DropdownIndicator:null},inputValue:h,isClearable:!0,isMulti:!0,menuIsOpen:!1,onBlur:C,onChange:M,onInputChange:b,onKeyDown:I,onClear:E,placeholder:"Enter an email and hit enter",value:d.map((e=>{return{label:n=e,value:n};var n})),autoFocus:!0}),g&&(0,c.jsx)(i.Text,{color:"error",children:g})]})}},83864(e,n,t){"use strict";t.d(n,{d:()=>F});var o=t(64467),i=(t(98992),t(54520),t(3949),t(81454),t(62953),t(96540)),a=t(42358),s=t(41514),r=t(12724),A=t(51900),c=t(27587),l=t(79748),d=t(80269),u=t(24609),h=t(49916),m=t(40298),g=t(30403),p=t(32788),f=t(29263),y=t(47410),b=t(76238),E=t(74848);const w=e=>{let{id:n,onDelete:t}=e;const o=(0,m.cW)(n,"email"),s=(0,i.useCallback)((()=>t({email:o})),[o,t]);return(0,E.jsxs)(a.Flex,{justifyContent:"between",alignItems:"center",children:[(0,E.jsxs)(a.Flex,{gap:4,children:[(0,E.jsx)(a.Icon,{color:"text",name:"check"}),(0,E.jsx)(a.Text,{children:o})]}),(0,E.jsx)(a.Button,{flavour:"borderless",icon:"trashcan",onClick:s})]})};var B=t(63872),C=t(64091),M=t(99728),T=t(63936),I=t(33821),v=t(56523),_=t(94404),Q=t(3319),D=t(60908);function x(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function k(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?x(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):x(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const R=e=>{let{email:n}=e;return(0,C.B9)(n)},S=(0,_.A)(a.Button),P={header:"Invitations",text:"Invitations successfully sent!"},F=e=>{let{onClose:n,isSubmodal:t=!1}=e;const{id:o,name:C,slug:_}=(0,u.ap)(),x=(0,h.WW)(),[F,Y]=(0,i.useState)(x),[U,N]=(0,i.useState)([]),[j,z]=(0,i.useState)([]),[H,O]=(0,i.useState)(),{sendLog:L,isReady:G}=(0,Q.A)(),{recordMilestone:J,defaultMilestones:q}=(0,v.A)(),[K,V,X,W]=(0,m.gg)(o),[,Z]=(0,B.A)(),$=(0,i.useCallback)((e=>{const{header:t,text:o}=e||P,i=(0,r.UI)({header:t,text:o,success:!0}),a=j.filter(R).map((e=>{let{email:n}=e;return n})).join(",");(0,s.H)("invite","invite-sent","".concat(H,"::").concat(a,"::").concat(F.join(",")),"","","invite-sent"),A.A.success(i,{context:"manageInvitations"}),J(q.firstTeamMemberInvited),n(),G&&L({isSuccess:!0},!0)}),[L,G,J]),ee=(0,i.useCallback)((async()=>{const e=j.filter(R).map((e=>{let{email:n,name:t}=e;return{email:n,name:t,role:H,roomIDs:F}})),n="".concat(window.envSettings.cloudUrl,"/spaces/").concat(_,"/join-space");X(e,n,{onSuccess:$,onError:e=>{Z(e),L({isFailure:!0,error:e.message},!0)}})}),[K,j,_,X,$,Z,L]),ne=(0,M._s)(),te=(0,i.useCallback)((e=>n=>{let{email:t}=n;e&&V(e),z(j.filter((e=>e.email!==t))),N(U.filter((e=>e.email!==t)))}),[j,U,V,z,N]),oe=(0,i.useCallback)((()=>{Y([])}),[Y]),ie="member"===H;return(0,E.jsx)(p.GO,{onClose:n,closeOnClickOutside:!1,children:(0,E.jsxs)(D.Ay,{feature:"ManageInvitationsModal",children:[(0,E.jsx)(f.z,{onClose:n,isSubmodal:t,title:"Invite Users"}),(0,E.jsxs)(y.U,{children:["Invite users to\xa0",C]}),(0,E.jsxs)(p.Yv,{children:[(0,E.jsx)(b.dE,{children:"Send invitations to your team"}),(0,E.jsx)(b.BZ,{children:"TIP: You can send more invitations at once, separate each with a comma."}),(0,E.jsx)(d.y,{invitations:j,setInvitations:z}),(0,E.jsx)("br",{}),(0,E.jsx)(b.dE,{children:"Rooms"}),(0,E.jsxs)(a.Flex,{alignItems:"center",justifyContent:"between",margin:[1,0,2],children:[(0,E.jsx)(a.TextSmall,{children:"Choose one or more rooms you'd like to invite users to."}),!!F.length&&(0,E.jsx)(a.Button,{onClick:oe,padding:[0],flavour:"borderless","data-ga":"rooms-clear",label:"Clear",small:!0,children:"Clear"})]}),(0,E.jsx)(a.Box,{"data-testid":"invite-selectRoom",children:(0,E.jsx)(c.A,k({selectedValue:F,onChange:Y},ie?{formatOptions:e=>{let{name:n}=e;return{isDisabled:n===g.Q8}},filterValues:e=>{let{label:n}=e;return n===g.Q8}}:{}))}),(0,E.jsx)("br",{}),(0,E.jsx)(b.dE,{children:"Role"}),(0,E.jsxs)(b.BZ,{children:["Choose a role for invited user."," ",(0,E.jsx)(l.A,{href:I.S0,target:"_blank",rel:"noopener noreferrer",Component:a.TextSmall,children:"Learn more"})]}),(0,E.jsx)(a.Box,{"data-testid":"invite-selectRole",children:(0,E.jsx)(T.A,{availableRoles:ne,dataGA:"invite-to-space",dataTestId:"invite-selectRole",onChange:e=>{O(e.target.value)},value:H})}),(0,E.jsx)(b.fh,{children:(0,E.jsx)(S,{label:"Send",onClick:ee,disabled:0===j.length||!H,flavour:"hollow",isLoading:W,"data-ga":"manage-invitations-modal::click-send::modal-footer"})}),(0,E.jsx)(a.H5,{margin:[2,0,0],children:"Invitations awaiting response"}),(0,E.jsx)(a.Flex,{column:!0,children:K.length>0?K.map((e=>(0,E.jsx)(w,{onDelete:te(e),id:e},e))):(0,E.jsxs)(b.au,{children:[(0,E.jsx)("br",{}),(0,E.jsx)(b.dE,{children:"You haven't invited any users yet."})]})})]})]})})}},76238(e,n,t){"use strict";t.d(n,{BZ:()=>A,au:()=>r,dE:()=>a,fh:()=>s});var o=t(51510),i=t(42358);const a=(0,o.default)(i.H5).withConfig({displayName:"styled__StyledH5",componentId:"sc-ghsyz5-0"})(["display:flex;align-items:center;"]),s=o.default.div.withConfig({displayName:"styled__FormRow",componentId:"sc-ghsyz5-1"})(["width:100%;display:flex;flex-flow:row no-wrap;justify-content:flex-end;margin-top:",";"],(0,i.getSizeBy)(2)),r=o.default.div.withConfig({displayName:"styled__StyledUserInvitationEmptyListItem",componentId:"sc-ghsyz5-2"})(["display:flex;flex-flow:column nowrap;align-items:center;"]),A=(0,o.default)(i.TextSmall).withConfig({displayName:"styled__StyledSecondaryText",componentId:"sc-ghsyz5-3"})(["margin:2px 0 8px;"])},99722(e,n,t){"use strict";t.d(n,{A:()=>H});var o=t(80045),i=t(64467),a=(t(98992),t(54520),t(3949),t(81454),t(62953),t(96540)),s=t(51510),r=t(42358),A=t(42660),c=t(74848);const l=["label","icon"];function d(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function u(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?d(Object(t),!0).forEach((function(n){(0,i.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):d(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const h=(0,s.default)(r.Flex).attrs((e=>u({alignItems:"center",gap:2,padding:[2,3.5],cursor:"pointer"},e))).withConfig({displayName:"menuItem__StyledFlex",componentId:"sc-1xfbolf-0"})(["&:hover{background-color:",";}"],(0,r.getColor)("mainBackground")),m=e=>{let{label:n,icon:t}=e,i=(0,o.A)(e,l);return(0,c.jsxs)(h,u(u({},i),{},{children:[t?(0,c.jsx)(r.Icon,{name:t,color:"text"}):null,(0,c.jsx)(r.Text,{children:n})]}))};var g=t(6084),p=t(6304),f=t(46587),y=t(23947),b=t(90657),E=(t(9391),t(32788)),w=t(29263),B=t(44926),C=t(78069),M=t(60908);function T(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function I(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?T(Object(t),!0).forEach((function(n){(0,i.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):T(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const v={flex:"grow"},_={flex:"1",height:"100%",overflow:"auto"},Q=e=>{let{onClose:n}=e;const[t,o]=(0,a.useState)(!1),[i,s]=(0,a.useState)(!1),[A,l]=(0,a.useState)({}),{handleChangeTab:d,activeTabIndex:u,tabsByName:h,tabs:m}=(0,C.A)(),g=(0,f.qO)(void 0,{shouldPersist:!0,onSuccess:n}),p=(0,a.useCallback)((()=>{o(!0),g(A).finally((()=>o(!1)))}),[A]),y=(0,a.useMemo)((()=>m.map((e=>{const{label:t,testId:o,Component:i}=h[e];return(0,c.jsx)(r.Tab,{label:t,"data-testid":"userProfileModal-".concat(o),children:(0,c.jsx)(M.Ay,{tab:e,children:(0,c.jsx)(i,I(I({},"profile"===e&&{setFormState:l,setFormValid:s}),{},{onClose:n}))})},e)}))),[m]);return(0,c.jsx)(E.GO,{onClose:n,width:{max:"60vw",base:180},children:(0,c.jsxs)(M.Ay,{feature:"UserSettings",children:[(0,c.jsx)(w.z,{onClose:n,title:"Settings",children:"profile"===m[u]&&(0,c.jsx)(r.Button,{disabled:!i,label:"Save",onClick:p,isLoading:t,loadingLabel:"saving..."})}),(0,c.jsx)(r.Tabs,{flex:"1",height:"100%",overflow:"hidden",selected:u,onChange:d,TabContent:E.Yv,TabsHeader:B.t,tabsProps:v,tabContentProps:_,children:y})]})})},D=(0,s.default)(r.Flex).attrs({column:!0,round:.5,border:{side:"all",color:"border"}}).withConfig({displayName:"dropdown__Dropdown",componentId:"sc-w0ej6i-0"})(["box-shadow:0 4px 4px rgba(0,0,0,0.25);"]);var x=t(39175),k=t(94404),R=t(74891);const S=["name"];function P(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function F(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?P(Object(t),!0).forEach((function(n){(0,i.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):P(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Y=(0,k.A)(m),U=(0,R.A)(g.A),N=(0,R.A)(r.Button),j=(0,s.default)(Y).attrs((e=>F(F({},e.hasSeparator?{border:{side:"top",color:"border"}}:{}),e))).withConfig({displayName:"userControl__StyledMenuItemWithLog",componentId:"sc-n4ebn8-0"})([""]),z={"Sign In":e=>{let{isAnonymous:n}=e;return n},"User Settings":e=>{let{isAgent:n,isAnonymous:t}=e;return n||!t},"Sign Out":e=>{let{isAnonymous:n}=e;return!n},default:()=>!0},H=e=>{let{dropdownBackground:n="darkBackground"}=e;const t=(0,f.uW)("avatarURL"),i=(0,f.uW)("name"),s=(0,f.uW)("isAnonymous"),[l,,d,u]=(0,p.A)(),h=(0,y.tN)(),m=(0,b.A)(),{handleOpenProfileModal:g,handleCloseProfileModal:E,isProfileModalOpen:w}=(0,C.A)(),B=(0,a.useMemo)((()=>[{name:"User Settings",icon:"user",onClick:()=>{u(),g()},hasSeparator:!1,testid:"userControl-settings"},{name:"Sign In",icon:"sign_in",onClick:()=>{u(),m()},hasSeparator:!0,testid:"userControl-signOut"},{name:"Sign Out",icon:"switch_off",onClick:()=>{u(),h()},hasSeparator:!0,testid:"userControl-signOut"}].filter((e=>(z[e.name]||z.default)({isAgent:x.Ay,isAnonymous:s})))),[s,x.Ay]);return(0,c.jsxs)(A.A,{children:[!x.Ay&&s?(0,c.jsx)(N,{icon:"sign_in",onClick:d,"data-ga":"sidebar-sign-in::click-dropdown::global-view","data-testid":"sign-in-dropdown",iconSize:"medium",tooltip:"Sign In",tooltipProps:{align:"right"}}):(0,c.jsx)(U,{src:t||"","data-testid":"userControl-userAvatar",onClick:d,title:"",tooltip:{title:i||"User",description:"Edit your account settings and manage your notifications"},tooltipProps:{align:"right"}}),l&&(0,c.jsx)(r.Layer,{margin:[5,14],position:"bottom-left",onClickOutside:u,onEsc:u,children:(0,c.jsx)(D,{width:{min:50},background:n,children:B.map((e=>{let{name:n}=e,t=(0,o.A)(e,S);return(0,c.jsx)(a.Fragment,{children:(0,c.jsx)(j,F({label:n,padding:[3,4],payload:{description:"User menu - Click ".concat(n)}},t),n)},n)}))})}),w&&(0,c.jsx)(Q,{onClose:E})]})}},78069(e,n,t){"use strict";t.d(n,{A:()=>jn});t(98992),t(54520),t(62953);var o=t(96540),i=t(31141),a=t(46587),s=t(42358),r=t(89841),A=t(6084),c=t(23947),l=t(6304);var d=t(64467),u=t(80045),h=(t(3949),t(58582)),m=t(74848);const g=["value","isValid","setIsValid","onChange","label","validators","hint"];function p(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const f="User name cannot exceed ".concat(40," characters"),y=e=>{const n=e.length<=40;return(0,r.H)(n,f)},b=e=>{let{value:n,isValid:t,setIsValid:i,onChange:a,label:A,validators:c=[],hint:l}=e,f=(0,u.A)(e,g);const b=(0,r.k)([y,...c]),[E,w]=(0,o.useState)("");return(0,o.useEffect)((()=>{const e=b(n),o=e.isValid,a=(0,h.W)(e);!t&&o?i(!0):t&&!o&&i(!1),w(a||"")}),[t,n]),(0,m.jsx)(s.TextInput,function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?p(Object(t),!0).forEach((function(n){(0,d.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):p(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({label:A||" ",name:"userName",placeholder:"Enter the user's name",value:n,onChange:a,hint:l,error:!t&&E},f))};var E=t(18739),w=t(24609),B=t(86706),C=t(6818);const M=[e=>(0,r.H)(!!e,"Name should not be empty")],T=(0,C.o)((e=>{let{email:n,isLastMember:t,spaceName:o,inputValue:i="",onInputChange:a,children:r}=e;return(0,m.jsxs)(s.Flex,{column:!0,gap:2,children:[(0,m.jsxs)(s.TextBig,{children:["You are about to delete your account at ",(0,m.jsx)("strong",{children:"Netdata"}),". All data related to your account will be deleted."]}),t&&(0,m.jsxs)(m.Fragment,{children:[(0,m.jsxs)(s.TextBig,{children:[(0,m.jsx)(s.TextBig,{strong:!0,children:o})," space will be deleted since you are the last member."]}),r]}),(0,m.jsx)(s.TextBig,{strong:!0,children:"This cannot be undone."}),(0,m.jsxs)(s.TextBig,{children:["To confirm, type ",(0,m.jsxs)(s.TextBig,{strong:!0,children:['"',n,'"']})," in the field below."]}),(0,m.jsx)(s.TextInput,{value:i,onChange:a,error:!0,hideErrorMessage:!0})]})})),I=e=>{let{setFormValid:n,setFormState:t}=e;const i=(0,a.uW)("name"),[r,d,u,h]=(0,s.useInputValue)({value:i,maxChars:40}),[g,p]=(0,o.useState)(!1),[f,y]=(0,o.useState)(""),[C,,I,v]=(0,l.A)(),[_,Q]=(0,l.A)(),{value:D}=(0,E.JN)(),x=(0,w.ap)(),k=1===(0,B.Gi)().length,R=(0,a.uW)("email"),S=(0,a.uW)("avatarURL"),P=(0,o.useMemo)((()=>_||f!==R),[_,f,R]),F=(0,o.useCallback)((e=>{let{target:n}=e;return y(n.value)}),[y]);(0,o.useEffect)((()=>{h&&n(g)}),[h,g]),(0,o.useEffect)((()=>{t({name:r})}),[r]);const Y=(0,c.z2)();return(0,m.jsxs)(m.Fragment,{children:[(0,m.jsx)(s.Flex,{"data-testid":"user-profile-container",flex:"grow",alignItems:"center",justifyContent:"center",children:(0,m.jsxs)(s.Flex,{width:{base:"100%",max:"420px"},alignItems:"center",column:!0,gap:3,children:[(0,m.jsx)(A.A,{src:S||"","data-testid":"userProfile-avatar",width:30,height:30}),(0,m.jsx)(s.Flex,{width:"100%",column:!0,margin:[4,0,0,0],children:(0,m.jsx)(b,{"data-testid":"userProfile-username",value:r,onChange:d,isValid:g,setIsValid:p,validators:M,fieldIndicator:u,instantFeedback:"positiveFirst",isDirty:h,textAlign:"center"})}),(0,m.jsx)(s.Text,{"data-testid":"userProfile-email",color:"tooltipText",textAlign:"center",children:R}),(0,m.jsx)(s.Flex,{justifyContent:"center",margin:[4,0,0,0],children:(0,m.jsx)(s.Button,{flavour:"hollow",danger:!0,onClick:I,label:"Delete account","data-ga":"user-profile-settings::click-delete::global-view","data-testid":"userProfile-deleteAccount-button"})})]})}),C&&(0,m.jsx)(s.ConfirmationDialog,{confirmLabel:_?"Deleting...":"Yes, delete","data-ga":"delete-account-dialog","data-testid":"deleteAccountDialog",handleConfirm:()=>{Q(),Y()},handleDecline:v,message:(0,m.jsx)(T,{email:R,isLastMember:k,spaceName:x.name,currentPlan:D,inputValue:f,onInputChange:F}),title:"Delete Account",isConfirmDisabled:P,isConfirmLoading:_,isDeclineDisabled:_})]})};var v=t(24285),_=t(70140),Q=t(63872);function D(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function x(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?D(Object(t),!0).forEach((function(n){(0,d.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):D(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const k=()=>{const e=(0,o.useRef)(),[n,,t,i]=(0,l.A)(),[a,s]=(0,v.tF)("spacesList"),r=(0,_.A)(),[A,c]=(0,o.useState)(r),[d]=(0,Q.A)(),u=(0,o.useCallback)((e=>{n||(t(),s(e),i())}),[n,s,t,i]),h=(0,o.useCallback)((e=>{u(x(x({},A),{},{spaceIds:e}))}),[A,u]),m=(0,o.useCallback)((e=>{let{id:n,color:t}=e;u(x(x({},A),{},{colors:x(x({},A.colors),{},{[n]:t})}))}),[A,u]),g=(0,o.useCallback)((e=>{var n;u(x(x({},A),{},{colors:x(x({},A.colors),{},{[e]:null===(n=r.colors)||void 0===n?void 0:n[e]})}))}),[r,A,u]),p=(0,o.useCallback)((e=>{u(x(x({},A),{},{colors:x(x({},A.colors),{},{[e]:void 0})}))}),[A,u]),f=(0,o.useMemo)((()=>!!a),[a]),y=(0,o.useCallback)((()=>{t(),s(null),i(),d({header:"Success",text:"All settings have been successfully deleted."})}),[s,t,i]);return(0,o.useEffect)((()=>{c(r)}),[r,c]),(0,o.useEffect)((()=>(e.current=Date.now(),()=>{e.current=0})),[]),{state:A,onOrderChange:h,onColorChange:m,onColorReset:g,onColorClear:p,onDelete:y,canDelete:f,loading:n}};t(81454);var R=t(43375),S=t(43627),P=t(74979),F=t(51510),Y=t(7542);function U(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function N(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?U(Object(t),!0).forEach((function(n){(0,d.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):U(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const j=(0,F.default)(s.Flex).attrs((e=>N({width:4,height:4,round:!0,border:{side:"all",color:"border"},cursor:"pointer",alignItems:"center",justifyContent:"center"},e))).withConfig({displayName:"styled__StyledColorBox",componentId:"sc-1iqu0oz-0"})(["background-color:",";"],(e=>e.background)),z=(0,F.default)(s.Icon).attrs((e=>N({color:(0,Y.T4)(e.backgroundColor)?["neutral","black"]:["neutral","white"]},e))).withConfig({displayName:"styled__StyledIcon",componentId:"sc-1iqu0oz-1"})([""]);var H=t(45087);t(8872);const O={green:["green50","green100","green150","green180"],red:["red50","red100","red150","red180"],yellow:["yellow50","yellow100","yellow150","yellow180"],blue:["blue50","blue100","blue150","blue180"],purple:["purple50","purple100","purple150","purple180"],violet:["violet50","violet100","violet150","violet180"]};var L=t(74891);const G=(0,L.A)(j),J=e=>{let{value:n,theme:t,onChange:i}=e;const a=(0,o.useMemo)((()=>Object.entries(O).reduce(((e,n)=>{let[o,i]=n;return[...e,...i.map((e=>(0,s.getColor)([o,e])({theme:t})))]}),[])),[t]),r=(0,o.useCallback)((()=>{const e=Math.floor(Math.random()*a.length-1),n=a[e];i({target:{value:n}})}),[a,i]);return(0,m.jsx)(G,{border:"none",background:n,onClick:r,tooltip:"Change to random color",children:(0,m.jsx)(z,{name:"refresh",backgroundColor:n})})},q=(0,o.memo)(J),K=e=>{let{theme:n,onChange:t}=e;return(0,m.jsx)(s.Flex,{gap:2,children:Object.entries(O).map((e=>{let[o,i]=e;return(0,m.jsx)(s.Flex,{column:!0,gap:2,children:i.map((e=>{const i=(0,s.getColor)([o,e])({theme:n});return(0,m.jsx)(j,{border:"none",background:i,onClick:()=>t({target:{value:i}})},e)}))},o)}))})},V=(0,o.memo)(K),X=e=>{let{value:n,onChange:t}=e;const i=(0,F.useTheme)(),[a,r]=(0,o.useState)();return(0,o.useEffect)((()=>{r((0,Y.Bi)(n)?null:"Invalid color value.")}),[n]),(0,m.jsxs)(s.Flex,{column:!0,gap:2,padding:[4],children:[(0,m.jsxs)(s.Flex,{gap:2,children:[(0,m.jsx)(q,{value:n,theme:i,onChange:t}),(0,m.jsx)(s.TextInput,{height:7,value:n,onChange:t,error:a})]}),(0,m.jsx)(s.TextSmall,{children:"Choose from default colors."}),(0,m.jsx)(V,{theme:i,onChange:t})]})},W=e=>{let{value:n,tooltip:t,onChange:i}=e;const a=(0,o.useRef)(),[r,A,,c]=(0,l.A)(!1);return(0,m.jsxs)(m.Fragment,{children:[(0,m.jsx)(H.A,{align:"bottom",content:t,children:(0,m.jsx)(s.Flex,{ref:a,children:(0,m.jsx)(j,{background:n,onClick:A})})}),a.current&&r?(0,m.jsx)(s.Drop,{width:60,target:a.current,align:{top:"bottom",right:"right"},background:"modalBackground",margin:[2,0,0],round:!0,onClickOutside:c,onEsc:c,children:(0,m.jsx)(X,{value:n,onChange:i})}):null]})},Z=e=>{let{value:n,onChange:t}=e;const i=(0,F.useTheme)(),a=(0,o.useMemo)((()=>{var e;return null===i||void 0===i||null===(e=i.colors)||void 0===e?void 0:e.spaceIdle}),[i]);return(0,m.jsx)(W,{value:n||a,onChange:t,tooltip:"Change color"})},$=e=>{let{id:n,color:t,onColorChange:i,onColorReset:a,onColorClear:s}=e;const r=(0,_.A)(),A=(0,o.useMemo)((()=>{var e;return(null===(e=r.colors)||void 0===e?void 0:e[n])!=t}),[r,t]),c=(0,o.useCallback)((e=>{i({id:n,color:e.target.value})}),[n,i]),l=(0,o.useCallback)((()=>{a(n)}),[n,a]),d=(0,o.useCallback)((()=>{s(n)}),[n,s]);return{isDirty:A,hasColor:!!t,onChange:c,onReset:l,onClear:d}};function ee(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function ne(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?ee(Object(t),!0).forEach((function(n){(0,d.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):ee(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const te=(0,L.A)(s.Icon),oe=e=>{let{id:n,color:t,onColorChange:o,onColorReset:i,onColorClear:a}=e;const{attributes:r,listeners:A,setNodeRef:c,transform:l,transition:d}=(0,S.gl)({id:n}),u={transform:P.Ks.Transform.toString(l),transition:d},{name:h}=(0,w.ns)(n),{isDirty:g,hasColor:p,onChange:f,onReset:y,onClear:b}=$({id:n,color:t,onColorChange:o,onColorReset:i,onColorClear:a});return(0,m.jsx)("div",ne(ne({ref:c,style:u},r),{},{children:(0,m.jsxs)(s.Flex,{gap:1,alignItems:"center",children:[(0,m.jsx)(s.Icon,ne(ne({name:"dots_2x3",color:"textLite"},A),{},{cursor:"grab"})),(0,m.jsx)(Z,{value:t,onChange:f}),(0,m.jsx)(s.Text,{children:h}),g?(0,m.jsx)(te,{name:"reload",color:"text",cursor:"pointer",onClick:y,tooltip:"Reset background color to previous value",noWrapper:!0}):null,p?(0,m.jsx)(te,{name:"x",color:"text",cursor:"pointer",onClick:b,tooltip:"Remove background color",noWrapper:!0}):null]},n)}))},ie=e=>{let{spaceIds:n,colors:t,onOrderChange:i,onColorChange:a,onColorReset:s,onColorClear:r}=e;const[A,c]=(0,o.useState)(n),[l,d]=(0,o.useState)(!1),u=(0,R.FR)((0,R.MS)(R.AN),(0,R.MS)(R.uN,{coordinateGetter:S.JR})),h=(0,o.useCallback)((e=>{const{active:n,over:t}=e;null!==n&&void 0!==n&&n.id&&null!==t&&void 0!==t&&t.id&&n.id!==t.id&&(c((e=>{const o=e.indexOf(n.id),i=e.indexOf(t.id);return(0,S.be)(e,o,i)})),d(!0))}),[S.be,c,d]);return(0,o.useEffect)((()=>{c(n)}),[n,c]),(0,o.useEffect)((()=>{l&&(i(A),d(!1))}),[A,l,d,i]),(0,m.jsx)(R.Mp,{sensors:u,collisionDetection:R.fp,onDragEnd:h,children:(0,m.jsx)(S.gB,{items:A,strategy:S._G,children:A.map((e=>(0,m.jsx)(oe,{id:e,color:null===t||void 0===t?void 0:t[e],onColorChange:a,onColorReset:s,onColorClear:r},e)))})})},ae=e=>{let{spaceIds:n,colors:t,onOrderChange:o,onColorChange:i,onColorReset:a,onColorClear:r}=e;return(0,m.jsx)(s.Flex,{column:!0,gap:2,children:(0,m.jsx)(ie,{spaceIds:n,colors:t,onOrderChange:o,onColorChange:i,onColorReset:a,onColorClear:r})})};function se(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const re="closeModal",Ae="deleteSettings",ce=()=>(0,m.jsxs)(s.Flex,{column:!0,gap:2,children:[(0,m.jsx)(s.Text,{children:"There are some unsaved changes that will be lost after closing the modal window."}),(0,m.jsx)(s.Text,{children:"Are you sure you want to colse without saving your changes?"})]}),le=()=>(0,m.jsxs)(s.Flex,{column:!0,gap:2,children:[(0,m.jsx)(s.Text,{children:"You are about to delete all previously saved settings. This action cannot be reverted."}),(0,m.jsx)(s.Text,{children:"Are you sure you want to continue?"})]}),de={[re]:{title:"Close modal",confirmLabel:"Close without saving",message:(0,m.jsx)(ce,{})},[Ae]:{title:"Delete settings",confirmLabel:"Yes, delete settings",message:(0,m.jsx)(le,{})}},ue=e=>{let{flavour:n,handleConfirm:t,handleDecline:i}=e;const a=(0,o.useMemo)((()=>de[n]),[n]);return(0,m.jsx)(s.ConfirmationDialog,function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?se(Object(t),!0).forEach((function(n){(0,d.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):se(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({"data-testid":"spacesListDialog",handleConfirm:t,handleDecline:i},a))},he=(0,L.A)(s.Button),me=()=>{const e=(0,w.UV)("loaded"),{state:n,onOrderChange:t,onColorChange:i,onColorReset:a,onColorClear:r,onDelete:A,canDelete:c,loading:l}=k(),{spaceIds:d,colors:u}=n||{},h=l,[g,p]=(0,o.useState)(),f=(0,o.useCallback)((()=>p()),[p]),y=(0,o.useCallback)((()=>{p(Ae)}),[p]),b=(0,o.useCallback)((()=>{A(),f()}),[A,f]);return e?(0,m.jsxs)(s.Flex,{column:!0,gap:1,children:[(0,m.jsx)(ae,{spaceIds:d,colors:u,onOrderChange:t,onColorChange:i,onColorReset:a,onColorClear:r}),(0,m.jsx)(s.Flex,{children:(0,m.jsx)(he,{label:"Delete all settings",flavour:"borderless",disabled:h||!c,onClick:y,danger:!0,tooltip:h||!c?null:"Delete any previously saved settings."})}),g?(0,m.jsx)(ue,{flavour:g,handleConfirm:b,handleDecline:f}):null]}):(0,m.jsx)(s.Flex,{children:(0,m.jsx)(s.Text,{color:"textLite",children:"Loading spacess..."})})},ge=()=>{const[e,n]=(0,v.tF)("theme"),[t,i]=(0,v.tF)("chartsDesign"),a=(0,o.useCallback)((e=>n(e.target.value)),[]),r=(0,o.useCallback)((e=>i(e.target.value)),[]);return(0,m.jsxs)(s.Flex,{column:!0,gap:3,justifyContent:"between",children:[(0,m.jsxs)(s.Flex,{column:!0,gap:2,"data-onboarding-id":"appearance-theme",children:[(0,m.jsx)(s.Text,{strong:!0,children:"UI Theme"}),(0,m.jsx)(s.RadioButton,{label:"Light",checked:"light"===e,onChange:a,value:"light"}),(0,m.jsx)(s.RadioButton,{label:"Dark",checked:"dark"===e||!e||"unspecified"===e,onChange:a,value:"dark"})]}),(0,m.jsxs)(s.Flex,{column:!0,gap:2,"data-onboarding-id":"appearance-charts",children:[(0,m.jsx)(s.Text,{strong:!0,children:"Charts design"}),(0,m.jsx)(s.RadioButton,{label:"Default",checked:"default"===t||!t,onChange:r,value:"default"}),(0,m.jsx)(s.RadioButton,{label:"Minimal",checked:"minimal"===t,onChange:r,value:"minimal"})]}),(0,m.jsxs)(s.Flex,{column:!0,gap:2,children:[(0,m.jsxs)(s.Flex,{column:!0,gap:.5,children:[(0,m.jsx)(s.Text,{strong:!0,children:"Configure spaces list"}),(0,m.jsx)(s.Text,{color:"textLite",children:"Reorder spaces by dragging and customize their colors using the color picker."})]}),(0,m.jsx)(me,{})]})]})};var pe=t(42790);function fe(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function ye(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?fe(Object(t),!0).forEach((function(n){(0,d.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):fe(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const be=[{value:"browser",label:"Browser default"},{value:"en-US",label:"English (US)"},{value:"en-GB",label:"English (UK)"},{value:"de-DE",label:"Deutsch"},{value:"fr-FR",label:"Fran\xe7ais"},{value:"es-ES",label:"Espa\xf1ol"},{value:"pt-BR",label:"Portugu\xeas (Brasil)"},{value:"ja-JP",label:"\u65e5\u672c\u8a9e"},{value:"zh-CN",label:"\u4e2d\u6587 (\u7b80\u4f53)"},{value:"ko-KR",label:"\ud55c\uad6d\uc5b4"},{value:"ru-RU",label:"\u0420\u0443\u0441\u0441\u043a\u0438\u0439"},{value:"it-IT",label:"Italiano"},{value:"nl-NL",label:"Nederlands"},{value:"pl-PL",label:"Polski"},{value:"tr-TR",label:"T\xfcrk\xe7e"},{value:"el-GR",label:"\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac"}],Ee=()=>{const[e,n]=(0,v.tF)("dimensionsToShow"),[t,i]=(0,v.tF)("defaultForcePlay"),[a,r]=(0,v.tF)("locale"),A=(0,o.useCallback)((e=>n(e.target.value)),[]),c=(0,o.useMemo)((()=>be.find((e=>{let{value:n}=e;return n===(null!==a&&void 0!==a?a:"browser")}))),[a]),l=(0,o.useCallback)((e=>r(e.value)),[r]),d=(0,pe.rI)(),u=(0,o.useCallback)((e=>{i("true"===e.target.value),d((n=>ye(ye({},n),{},{forcePlay:"true"===e.target.value})))}),[]);return(0,m.jsxs)(s.Flex,{column:!0,gap:3,justifyContent:"between",children:[(0,m.jsxs)(s.Flex,{column:!0,gap:2,children:[(0,m.jsxs)(s.Flex,{column:!0,gap:.5,children:[(0,m.jsx)(s.Text,{strong:!0,children:"Which dimensions to show?"}),(0,m.jsx)(s.TextSmall,{color:"textDescription",children:"When set to Non Zero, dimensions that have all their values (within the current view) set to zero will not be transferred from the netdata server (except if all dimensions of the chart are zero, in which case this setting does nothing - all dimensions are transferred and shown). When set to All, all dimensions will always be shown. Set it to Non Zero to lower the data transferred between netdata and your browser, lower the CPU requirements of your browser (fewer lines to draw) and increase the focus on the legends (fewer entries at the legends)."})]}),(0,m.jsx)(s.RadioButton,{label:"Non Zero",checked:"nonZero"===e||!e,onChange:A,value:"nonZero"}),(0,m.jsx)(s.RadioButton,{label:"All",checked:"all"===e,onChange:A,value:"all"})]}),(0,m.jsxs)(s.Flex,{column:!0,gap:2,"data-onboarding-id":"preferences-refresh",children:[(0,m.jsxs)(s.Flex,{column:!0,gap:.5,children:[(0,m.jsx)(s.Text,{strong:!0,children:"Default refresh mode"}),(0,m.jsx)(s.TextSmall,{color:"textDescription",children:"Choose your preferred default refresh mode. Standard Play pauses automatic refreshes when the browser tab loses focus to save system resources. Force Play keeps refreshing continuously, even when the tab is not in focus. This is useful for monitoring dashboards on secondary screens."})]}),(0,m.jsx)(s.RadioButton,{label:"Standard Play",checked:!t,onChange:u,value:"false"}),(0,m.jsx)(s.RadioButton,{label:"Force Play",checked:t,onChange:u,value:"true"})]}),(0,m.jsxs)(s.Flex,{column:!0,gap:2,"data-onboarding-id":"preferences-locale",children:[(0,m.jsxs)(s.Flex,{column:!0,gap:.5,children:[(0,m.jsx)(s.Text,{strong:!0,children:"Number and date format"}),(0,m.jsx)(s.TextSmall,{color:"textDescription",children:"Choose how numbers, dates, and times are formatted throughout the application."})]}),(0,m.jsx)(s.Select,{value:c,options:be,onChange:l,menuPlacement:"auto",placeholder:"Select locale..."})]})]})};var we=t(64587),Be=t(41514);t(89463),t(42762);const Ce=(0,t(94404).A)(s.Button),Me=["scope:all","scope:agent-ui","scope:grafana-plugin","scope:mcp"],Te=e=>{let{onCloseModal:n,onCreate:t}=e;const[i,a]=(0,o.useState)(""),[r,A]=(0,o.useState)("scope:all");return(0,m.jsxs)(s.ModalContent,{children:[(0,m.jsxs)(s.ModalHeader,{children:["Create New Token",(0,m.jsx)(s.ModalCloseButton,{testId:"close-button",onClose:n})]}),(0,m.jsxs)(s.ModalBody,{gap:2,width:80,children:[(0,m.jsx)(s.TextInput,{"data-testid":"description",placeholder:"Enter Description",onChange:e=>{let{target:n}=e;return a(n.value)},value:i,size:"small"}),(0,m.jsx)(s.Flex,{column:!0,gap:2,"data-testid":"scopes",children:Me.map((e=>(0,m.jsx)(s.RadioButton,{checked:r===e,onChange:e=>A(e.target.value),value:e,alignItems:"start",children:(0,m.jsx)(s.TextSmall,{children:e})},e)))})]}),(0,m.jsx)(s.ModalFooter,{children:(0,m.jsx)(Ce,{disabled:""===i.trim(),label:"Create",onClick:()=>t({description:i,scope:r}),"data-testid":"btn-create",payload:{description:"Modal - Create Token"}})})]})};var Ie=t(12273),ve=t(81214),_e=t(61409),Qe=t(12724);function De(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function xe(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?De(Object(t),!0).forEach((function(n){(0,d.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):De(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ke=function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return()=>{(0,ve.A)(e);const t=(0,Qe.UI)(xe(xe({},xe({header:"Copied",text:"Token copied to your clipboard!",icon:"gear"},n)),{},{success:!0}));_e.oR.success(t,{context:"copy"})}};var Re=t(60908);const Se=e=>{let{onCloseModal:n,token:t}=e;return(0,m.jsx)(s.ModalContent,{children:(0,m.jsxs)(Re._0,{feature:"TokenCreated",children:[(0,m.jsxs)(s.ModalHeader,{children:["Token Generated",(0,m.jsx)(s.ModalCloseButton,{testId:"close-button",onClose:n})]}),(0,m.jsx)(s.ModalBody,{children:(0,m.jsxs)(s.Flex,{column:!0,gap:4,children:[(0,m.jsxs)(Ie.FU,{children:[(0,m.jsx)("span",{"data-testid":"token",children:t}),(0,m.jsx)(Ie.vE,{name:"copy",size:"small",color:"primary",onClick:ke(t),"data-ga":"profile::click-copytoken::all-pages::api-token"})]}),(0,m.jsxs)(s.Flex,{alignItems:"center",background:"warningBackground",gap:4,padding:[3,3],round:!0,children:[(0,m.jsx)(Ie.RV,{"data-testid":"noNodesView-warningBannerIcon",name:"exclamation",color:"warning"}),(0,m.jsx)(Ie.yB,{"data-testid":"warning-message",children:"Make sure to copy or store this token and add it where you need it. Once you close this modal you will not be able to retrieve it again."})]})]})}),(0,m.jsx)(s.ModalFooter,{children:(0,m.jsx)(s.Button,{label:"Close",onClick:n})})]})})};var Pe=t(67916);const Fe=e=>{let{onCloseModal:n,onTokenCreated:t,view:i,token:a}=e;const[r,A]=(0,o.useState)(i),[c,l]=(0,o.useState)(a);return(0,m.jsxs)(s.Modal,{onClickOutside:n,onEsc:n,children:["CreateView"===r&&(0,m.jsx)(Te,{"data-testid":"create-view",onCloseModal:n,onCreate:async e=>{let{description:n,scope:o}=e;const i=await(0,Pe.Ey)({description:n,scope:o});(0,Be.H)("api-token","create-token","profile",{description:n,scope:o}),l(i.data.token),A("CopyTokenView"),t()}}),"CopyTokenView"===r&&c&&(0,m.jsx)(Se,{onCloseModal:n,token:c})]})},Ye=()=>{const[e,n]=(0,o.useState)([]),[t,,i,a]=(0,l.A)(!0),s=async()=>{i();const e=await(0,Pe.li)();a(),n(e.data)};return(0,o.useEffect)((()=>{s()}),[]),{isLoading:t,data:e,setData:n,fetchData:s}};var Ue=t(3319);const Ne={right:["actions"]},je=()=>{const{localeDateString:e}=(0,we.$j)(),n=(0,o.useMemo)((()=>(e=>[{id:"description",header:"Description",fullWidth:!0,size:150,wrap:!0,cell:e=>{let{getValue:n}=e;return(0,m.jsx)(H.A,{plain:!0,content:n()||"",isBasic:!0,children:(0,m.jsx)(s.TextSmall,{truncate:!0,children:n()||""})})}},{id:"scopes",header:"Scope",fullWidth:!0,size:150,wrap:!0,cell:e=>{let{getValue:n}=e;return(0,m.jsx)(s.TextSmall,{children:(Array.isArray(n())?n():[]).join(", ")})}},{id:"created_at",header:"Created at",cell:n=>{let{getValue:t}=n;return(0,m.jsx)(s.TextSmall,{children:t()?e(new Date(t())):"-"})},sortingFn:"datetime"},{id:"last_used_at",header:"Used at",cell:n=>{let{getValue:t}=n;return(0,m.jsx)(s.TextSmall,{children:t()?e(new Date(t())):"-"})},sortingFn:"datetime"},{id:"ends_with",header:"Token",cell:e=>{let{getValue:n}=e;return(0,m.jsxs)(m.Fragment,{children:[(0,m.jsx)(s.TextSmall,{style:{verticalAlign:"sub"},children:"******"}),n()]})}}])(e)),[e]),[t,i]=(0,o.useState)(!1),[a,r]=(0,o.useState)(!1),[A,c]=(0,o.useState)(null),{data:l,setData:d,fetchData:u}=Ye(),{sendButtonClickedLog:h}=(0,Ue.A)(),g=(0,o.useMemo)((()=>({addEntry:{handleAction:()=>{i(!0),h({label:"Create new token"},!0)},tooltipText:"Create new token"}})),[h]),[,p]=(0,Q.A)(),f=(0,o.useMemo)((()=>({delete:{handleAction:e=>{let{id:n}=e;(0,Pe.ni)(n).then((()=>{d((e=>e.filter((e=>e.id!==n)))),(0,Be.H)("api-token","delete-token","profile"),h({label:"Delete token"},!0)})).catch(p)},confirmationTitle:"Delete API Token",confirmationMessage:"You are about to delete API Token, are you sure you want to continue?"}})),[l]);return(0,m.jsxs)(m.Fragment,{children:[(0,m.jsx)(s.Table,{dataColumns:n,data:l,bulkActions:g,rowActions:f,enableResizing:!0,enableSorting:!0,columnPinning:Ne,enableColumnPinning:!0}),t&&(0,m.jsx)(Fe,{view:"CreateView",onCloseModal:()=>i(!1),onTokenCreated:u}),a&&(0,m.jsx)(Fe,{view:"CopyTokenView",onCloseModal:()=>{r(!1),c(null)},token:A})]})};var ze=t(48301),He=t(78152),Oe=t(63950),Le=t.n(Oe);const Ge=["disabledInfo"],Je=["title","description","label","checked","onChange"];function qe(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Ke(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?qe(Object(t),!0).forEach((function(n){(0,d.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):qe(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Ve=(Xe=s.Toggle,e=>{let{disabledInfo:n="This setting is disabled"}=e,t=(0,u.A)(e,Ge);const{disabled:o}=t||{};return o?(0,m.jsx)(H.A,{content:n,children:(0,m.jsx)(s.Box,{children:(0,m.jsx)(Xe,Ke({},t))})}):(0,m.jsx)(Xe,Ke({},t))});var Xe;const We=e=>{let{title:n="",description:t="",label:o="",checked:i,onChange:a=Le()}=e,r=(0,u.A)(e,Je);return(0,m.jsxs)(s.Flex,{justifyContent:"between",alignItems:"center",children:[(0,m.jsxs)(s.Flex,{gap:.5,column:!0,children:[(0,m.jsx)(s.Text,{strong:!0,children:n}),(0,m.jsx)(s.TextSmall,{color:"textLite",children:t})]}),(0,m.jsx)(Ve,Ke({colored:!0,onChange:a,checked:i,"data-testid":"profileNotifications-".concat(o,"NotificationsToggle"),"data-ga":"profile-ntab::click-toggle::global-view::".concat(i?"enabled":"disabled")},r))]})};var Ze=t(194);const $e=()=>{const{isLoading:e,data:n}=Ye(),[t,i]=(0,o.useState)(),[a,s]=(0,o.useState)(),[,r]=(0,Q.A)();return(0,o.useEffect)((()=>{e||(async()=>{(null===n||void 0===n?void 0:n.length)>0&&await Promise.all(n.map((e=>{let{id:n}=e;return(0,Pe.ni)(n)}))).catch(r),(0,Pe.Ey)({description:"Mobile App",scope:"scope:mobile-app"}).then((e=>{let{data:n}=e;null!==n&&void 0!==n&&n.token&&i(n.token)})).catch((e=>{r({header:"Error",text:"Something went wrong"}),s(!0)}))})()}),[e]),{token:t,error:a}},en=e=>{let{value:n}=e;return(0,m.jsx)(s.Box,{width:"232px",height:"232px",background:"white",padding:[4],round:3,children:(0,m.jsx)(Ze.Ay,{size:200,value:n})})},nn=e=>{let{onClose:n=Le()}=e;const{token:t,error:o}=$e();return(0,m.jsx)(s.Modal,{children:(0,m.jsxs)(s.ModalContent,{width:{min:100,base:140},children:[(0,m.jsxs)(s.ModalHeader,{children:[(0,m.jsx)(s.Text,{children:"Scan QR Code"}),(0,m.jsx)(s.ModalCloseButton,{onClose:n,testId:"close-button"})]}),(0,m.jsx)(s.ModalBody,{children:(0,m.jsx)(s.Flex,{alignItems:"center",justifyContent:"center",height:100,children:t?(0,m.jsx)(en,{value:t}):(0,m.jsx)(s.Flex,{width:"100%",height:"100%",alignItems:"center",justifyContent:"center",padding:[4],children:o?(0,m.jsx)(s.TextBigger,{children:"An error occurred"}):(0,m.jsx)(s.H3,{children:"Generating token"})})})}),(0,m.jsx)(s.ModalFooter,{children:(0,m.jsx)(s.Flex,{justifyContent:"end",padding:[1,2],children:(0,m.jsx)(s.Button,{label:"Done",onClick:n})})})]})})},tn=()=>{const[e,n]=(0,ze.j$)("email"),t=(0,o.useCallback)((()=>n(!e)),[e,n]),[i,a]=(0,ze.j$)("mobileApp"),r=(0,o.useCallback)((()=>{a(!(null!==i&&void 0!==i&&i.enabled))}),[i,a]),[A,,c,d]=(0,l.A)();return(0,m.jsxs)(s.Flex,{column:!0,gap:2,children:[(0,m.jsx)(We,{title:"E-mail",description:"Netdata will send you health notifications via e-mails",label:"email",checked:e,onChange:t}),He.Mh&&(0,m.jsxs)(s.Flex,{column:!0,children:[(0,m.jsx)(We,{title:"Mobile App Notifications",description:"Netdata will send you health notifications via mobile app notifications",label:"mobileApp",checked:null===i||void 0===i?void 0:i.enabled,onChange:r,disabled:!(null!==i&&void 0!==i&&i.linked),disabledInfo:"You have to link a device first and then enable notifications"}),(0,m.jsx)(s.Flex,{justifyContent:"end",children:(0,m.jsx)(s.Button,{flavour:"borderless",icon:"qrCode",iconColor:"primary",iconSize:"small",onClick:c,children:(0,m.jsx)(s.Text,{color:"primary",children:"Show QR code"})})}),A&&(0,m.jsx)(nn,{onClose:d})]})]})};var on=t(62718),an=t(99728),sn=(t(72577),t(55093)),rn=t(76571),An=t(84060),cn=t(49916),ln=t(84049),dn=t(19186),un=t(71819);const hn=F.default.div.withConfig({displayName:"styled__Row",componentId:"sc-lpc291-0"})(["width:100%;height:auto;display:flex;flex-flow:row nowrap;align-items:center;padding-left:",";padding-top:",";padding-bottom:",";margin-bottom:",";&:nth-child(odd){background-color:",";}"],(0,s.getSizeBy)(3),(0,s.getSizeBy)(1),(0,s.getSizeBy)(1),(0,s.getSizeBy)(2),(0,s.getColor)("modalTabsBackground")),mn=(0,F.default)(s.Icon).withConfig({displayName:"styled__StyledIcon",componentId:"sc-lpc291-1"})(["width:20px;height:20px;margin-right:",";"],(0,s.getSizeBy)()),gn=(0,F.css)(["&&{width:250px;}margin-left:auto;"]),pn=(0,F.default)(s.Select).withConfig({displayName:"styled__StyledSelect",componentId:"sc-lpc291-2"})(["",""],gn),fn=(0,F.default)(s.Button).withConfig({displayName:"styled__SettingsLoader",componentId:"sc-lpc291-3"})([""," color:",";border:1px solid ",";.path{stroke:",";}"],gn,(0,s.getColor)("text"),(0,s.getColor)("border"),(0,s.getColor)("text")),yn=e=>{let{roomId:n,spaceId:t}=e;const i=(0,un.A)(),[a,s]=(0,ze.yP)({roomId:n,spaceId:t,key:"notification_options"}),[r,A]=(0,o.useState)(),c=(0,o.useMemo)((()=>a?i.filter((e=>a.includes(e.value))):[]),[i,a]),l=(0,o.useCallback)((e=>{A(e)}),[A]),d=(0,o.useCallback)((()=>{r&&Array.isArray(r)&&s(r.map((e=>{let{value:n}=e;return n})))}),[r,s]);return(0,m.jsx)(pn,{options:i,value:r||c,onChange:l,onMenuClose:d,isMulti:!0,closeMenuOnSelect:!1,menuPlacement:"auto"})},bn=e=>{let{roomId:n,spaceId:t}=e;const[i,a]=(0,o.useState)(),r=(0,ln.th)(t,n,{onFail:()=>{a(!1)},onSuccess:()=>{a(!1)}}),A=(0,o.useCallback)((()=>{a(!0),r()}),[r,n]);return(0,m.jsx)(s.Box,{margin:[0,2,0,"auto"],children:(0,m.jsx)(H.A,{content:"Join this room to activate notifications for it",isBasic:!0,children:(0,m.jsx)(s.Button,{isLoading:i,label:"Join",onClick:A,flavour:"hollow"})})})},En=e=>{let{isMember:n,roomId:t,spaceId:i}=e;const a=(0,dn.wz)(t,"name");return(0,m.jsxs)(hn,{"data-testid":"roomSettings-room-".concat(a),"data-ga":"profile-ntab::click-join-room-".concat(a,"::global-view"),children:[(0,m.jsx)(mn,{name:"room",color:"text"}),(0,m.jsx)(s.Text,{children:a}),n?(0,m.jsx)(o.Suspense,{fallback:(0,m.jsx)(fn,{isLoading:!0,flavour:"hollow",label:"Loading settings.."}),children:(0,m.jsx)(yn,{roomId:t,spaceId:i})}):(0,m.jsx)(bn,{roomId:t,spaceId:i})]})},wn=F.default.div.withConfig({displayName:"styled__CollapsibleRoot",componentId:"sc-1p8t8zr-0"})(["width:100%;height:",";display:flex;flex-flow:row nowrap;align-items:center;cursor:pointer;margin-bottom:",";"],(0,s.getSizeBy)(5),(0,s.getSizeBy)(2)),Bn=(0,F.default)(s.Icon).withConfig({displayName:"styled__OpenerIcon",componentId:"sc-1p8t8zr-1"})(["height:5px;width:6px;margin-right:",";",";"],(0,s.getSizeBy)(2),(e=>{let{expanded:n}=e;return n&&"transform: rotate(90deg)"})),Cn=(0,F.default)(s.Icon).withConfig({displayName:"styled__SpaceIcon",componentId:"sc-1p8t8zr-2"})(["width:20px;height:20px;margin-right:",";"],(0,s.getSizeBy)()),Mn=(0,F.default)(s.Text).withConfig({displayName:"styled__SpaceLabel",componentId:"sc-1p8t8zr-3"})(["font-weight:bold;"]),Tn=(0,F.default)(s.TextSmall).withConfig({displayName:"styled__SettingsInfo",componentId:"sc-1p8t8zr-4"})(["margin-left:auto;opacity:0.8;"]);var In=t(41258);const vn=e=>{let{expanded:n,onExpand:t,label:o,spaceId:i}=e;(0,In.A)(i);const a=(0,rn.t5)(i,"loaded"),s=(0,rn.t5)(i,"channels").find((e=>"Email"===e.integration.slug))||{};return(0,m.jsxs)(wn,{onClick:t,"data-testid":"spaceRoot-space-".concat(o),children:[(0,m.jsx)(Bn,{name:"chevron_right",expanded:n,color:"text"}),(0,m.jsx)(Cn,{name:"space",color:"text"}),(0,m.jsx)(Mn,{children:o}),a&&!s.enabled&&(0,m.jsx)(Tn,{children:"E-mail notifications for this space has been disabled by admin"})]})},_n={width:"108px",height:"77px"},Qn=e=>{let{spaceId:n,isCurrent:t,showAllRooms:i}=e;const[a,r]=(0,l.A)(t),A=(0,An.A)(n,{autorun:!1,polling:!1}),c=(0,cn.sC)(n,"loaded");(0,o.useEffect)((()=>{n&&a&&!c&&A()}),[a,c,n,A]);const d=(0,w.ns)(n,"name"),u=(0,cn.CB)(n),h=(0,o.useMemo)((()=>i?u:u.filter((e=>{let{isMember:n}=e;return n}))),[u,i]),g=(0,o.useRef)();return(0,o.useEffect)((()=>{a&&g.current&&g.current.scrollIntoView({behavior:"smooth",block:"start"})}),[a]),(0,m.jsxs)(s.Flex,{column:!0,ref:g,children:[(0,m.jsx)(vn,{expanded:a,onExpand:r,label:d,spaceId:n}),(0,m.jsx)(s.Collapsible,{gap:0,open:a,children:()=>c?(0,m.jsx)(m.Fragment,{children:h.map((e=>{let{id:t,isMember:o}=e;return(0,m.jsx)(En,{isMember:o,roomId:t,spaceId:n},t)}))}):(0,m.jsx)(sn.A,{iconProps:_n,title:"Loading..."})})]})};var Dn=t(64602);const xn=["onFilterClick","showAllRooms"];function kn(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Rn(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?kn(Object(t),!0).forEach((function(n){(0,d.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):kn(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Sn=e=>{let{onFilterClick:n,showAllRooms:t}=e,o=(0,u.A)(e,xn);return(0,m.jsxs)(s.Flex,Rn(Rn({gap:2,padding:[1,2],"data-testid":"roomFilterPills"},o),{},{children:[(0,m.jsx)(Dn.O,{flavour:t?"hollow":"default",onClick:n(!1),label:"My Rooms","data-ga":"roomFilterPills::click-my::global-view","data-testid":"roomFilterPills-showMy"}),(0,m.jsx)(Dn.O,{flavour:t?"default":"hollow",onClick:n(!0),label:"All Rooms","data-ga":"roomFilterPills::click-show-all::global-view","data-testid":"roomFilterPills-showAll"})]}))},Pn=()=>{const e=(0,w.vt)(),n=(e=>{const n=(0,w.UV)("ids");return(0,o.useMemo)((()=>n.reduce(((n,t)=>t===e?[t,...n]:[...n,t]),[])),[e,n])})(e),t=(0,an.JT)("room:ReadAll"),[i,a]=(0,o.useState)(!1);return(0,m.jsxs)(m.Fragment,{children:[(0,m.jsx)(s.TextBig,{strong:!0,children:"Notifications for all your Netdata Spaces and all the Rooms you are in"}),t&&(0,m.jsx)(Sn,{onFilterClick:e=>n=>{n.stopPropagation(),a(e)},showAllRooms:i,padding:[1,0]}),(0,m.jsx)(s.Flex,{overflow:{vertical:"auto"},column:!0,"data-testid":"spaceRoomNotifications-spacesContainer",padding:[3,3,3,0],children:(0,m.jsx)(o.Suspense,{fallback:(0,m.jsx)(on.aW,{}),children:n.map((n=>(0,m.jsx)(Qn,{isCurrent:n===e,showAllRooms:i,spaceId:n},n)))})})]})},Fn=(0,o.memo)((()=>(0,m.jsxs)(s.Flex,{overflow:{vertical:"hidden"},column:!0,gap:4,children:[(0,m.jsx)(s.TextBig,{strong:!0,children:"Notification Methods"}),(0,m.jsx)(tn,{}),(0,m.jsx)(s.Flex,{height:"1px",background:"border"}),(0,m.jsx)(Pn,{})]}))),Yn={offline:["theme","preferences"],online:["profile","theme","preferences","notifications","apiTokens"]},Un={profile:{Component:I,label:(0,m.jsx)(s.Text,{children:"Profile"}),order:0,testId:"profileTab"},theme:{Component:ge,label:(0,m.jsx)(s.Text,{children:"Appearance"}),order:1,testId:"themeTab"},preferences:{Component:Ee,label:(0,m.jsx)(s.Text,{children:"Preferences"}),order:2,testId:"preferencesTab"},notifications:{Component:Fn,label:(0,m.jsx)(s.Text,{children:"Notifications"}),order:3,testId:"notificationsTab"},apiTokens:{Component:je,label:(0,m.jsx)(s.Text,{children:"API tokens"}),order:4,testId:"apiTokensTab"}},Nn=e=>(Un[e]||Un.profile).order,jn=function(){let{modalName:e="profile"}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,a.uW)("isAnonymous"),t=(0,o.useMemo)((()=>Object.keys(Un).filter((e=>Yn[n?"offline":"online"].includes(e)))),[n]),{isModalOpen:s,currentModalTab:r,handleOpenModal:A,handleCloseModal:c,handleChangeModalTab:l}=(0,i.A)(e),[d,u]=(0,o.useState)(Nn(r)),h=e=>{l(e)};return(0,o.useEffect)((()=>{u(Nn(r))}),[r]),{handleOpenProfileModal:function(){A(arguments.length>0&&void 0!==arguments[0]?arguments[0]:"profile")},handleCloseProfileModal:()=>{c()},getPreselectedTab:Nn,setCurrentTab:h,handleChangeTab:e=>{const n=t[e];u(e),h(n)},tabs:t,tabsByName:Un,activeTabIndex:d,isProfileModalOpen:s}}},67916(e,n,t){"use strict";t.d(n,{Ey:()=>i,li:()=>a,ni:()=>s});t(89463);var o=t(91130);const i=e=>{let{description:n,scope:t}=e;return o.A.post("/api/v1/auth/account/api-token",{description:n,scopes:[t]})},a=()=>o.A.get("/api/v1/auth/account/api-token"),s=e=>o.A.delete("/api/v1/auth/account/api-token/".concat(e))},12273(e,n,t){"use strict";t.d(n,{FU:()=>c,RV:()=>l,vE:()=>u,yB:()=>d});t(98992),t(54520),t(3949);var o=t(64467),i=t(51510),a=t(42358),s=t(74848);function r(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function A(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?r(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):r(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const c=i.default.div.withConfig({displayName:"styled__TokenContainer",componentId:"sc-s1axew-0"})(["display:flex;flex-direction:column;align-items:center;color:",";background:",";border:1px solid ",";border-radius:2px;overflow-wrap:anywhere;white-space:pre-wrap;padding:23px 27px 14px;width:100%;font-family:monospace;letter-spacing:0.09px;line-height:18px;font-size:14px;word-break:break-word;"],(0,a.getColor)("textDescription"),(0,a.getColor)("modalTabsBackground"),(0,a.getColor)("borderSecondary")),l=e=>(0,s.jsx)(a.Box,A(A({},e),{},{as:a.Icon,sx:{borderRadius:"50%",overflow:"hidden",background:(0,a.getColor)(["neutral","white"])}})),d=e=>(0,s.jsx)(a.Text,A(A({},e),{},{color:["neutral","grey35"]})),u=e=>(0,s.jsx)(a.Flex,A(A({},e),{},{as:a.Icon,sx:{alignSelf:"flex-end",cursor:"pointer"}}))},61533(e,n,t){"use strict";t.d(n,{A:()=>je});var o=t(64467),i=t(80045),a=(t(33110),t(98992),t(54520),t(72577),t(3949),t(81454),t(8872),t(62953),t(96540)),s=t(42358),r=t(65687),A=t(74848);const c=["value"];function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const d=e=>{let{value:n}=e,t=(0,i.A)(e,c);return(0,A.jsx)(r.Ay,function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?l(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):l(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({value:n.value},t))};t(89463);var u=t(45087);const h=["description","maxValue","strong","type","value","valueOptions","wrap"],m=e=>{let{description:n="",maxValue:t,strong:o,type:a,value:c,valueOptions:l={},wrap:d}=e,m=(0,i.A)(e,h);const g=100*c/t,p="bar"===a,f=(null===m||void 0===m?void 0:m["data-testid"])||"progressValue",y=(0,r.k4)(c,l),b=y.value,E=y.units;return(0,A.jsx)(u.A,{content:(0,A.jsx)(r.$f,{description:n,value:c,units:l.units}),isBasic:!0,stretch:"align",children:(0,A.jsxs)(s.Flex,{column:!0,"data-testid":f,gap:1,flexWrap:d,flex:!0,children:[!p&&(0,A.jsxs)(r.WT,{strong:o,testid:f,justifyContent:"end",children:[b," ",(0,A.jsx)(s.TextSmall,{children:E})]}),null!==c&&(0,A.jsx)(s.ProgressBar,{background:"border",border:"none",color:["green","netdata"],containerWidth:"100%","data-testid":"".concat(f,"-bar"),height:2,width:"".concat(g,"%")})]})})},g=["value","wrap"];function p(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function f(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?p(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):p(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const y=e=>{let{value:n,wrap:t}=e,o=(0,i.A)(e,g);return n=Array.isArray(n)?n:[n],(0,A.jsx)(s.Flex,{alignItems:"center",justifyContent:"center",flexWrap:t,children:n.map(((e,n)=>(0,A.jsx)(s.Pill,{flavour:"neutral","data-testid":"pillValueComponent",margin:[.3],normal:!1,children:(0,A.jsx)(r.Ay,f(f({value:e},o),{},{wrap:t}))},"".concat(e,"-").concat(n))))})};var b=t(24609),E=t(19186),w=t(51510),B=t(75894);const C=["color"];function M(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function T(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?M(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):M(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const I=e=>{let{hide:n,user:t}=e;return n||!t?null:(0,A.jsxs)(A.Fragment,{children:[(0,A.jsx)(s.Text,{children:"by"}),(0,A.jsx)(v,{children:t})]})},v=e=>{let{color:n="text"}=e,t=(0,i.A)(e,C);return(0,A.jsx)(s.Text,T({color:n,strong:!0},t))},_=e=>{let{type:n,text:t=n,hollow:o}=e;return(0,A.jsx)(B.A,T(T({flavour:n.toLowerCase()},o&&{border:!1}),{},{children:t}))},Q=(0,w.default)(s.Flex).attrs({gap:1,flexWrap:!0,alignItems:"center"}).withConfig({displayName:"components__Container",componentId:"sc-b5rk6g-0"})([""]);var D=t(41344),x=t(24013),k=t(79748);const R=["chart","hosts","alert"];function S(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function P(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?S(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):S(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const F={CLEAR:0,WARNING:1,CRITICAL:2,UNINITIALIZED:3,UNDEFINED:4},Y=e=>{let{chart:n,context:t,nodeId:o,nodeName:i,spaceSlug:r,roomSlug:c,alert:l}=e;const d=(0,D.Zp)(),h=(0,x.Zl)(o),m=(0,a.useCallback)((()=>{var e;null!==l&&void 0!==l&&l.id?d(h,{state:{alertId:l.id}}):d(h,{state:{contextToGo:null===t||void 0===t||null===(e=t.name)||void 0===e?void 0:e[0]}})}),[h,l]),g="/spaces/".concat(r,"/rooms/").concat(c,"/alerts/").concat(null===l||void 0===l?void 0:l.id),{current:p,name:f,previous:y}=l;if("ERROR"===p.status)return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"For the alert"}),(0,A.jsx)(k.A,{Component:v,as:D.N_,to:g,children:f}),(0,A.jsx)(s.Text,{children:"for"}),(0,A.jsx)(k.A,{Component:v,onClick:m,children:n.name}),(0,A.jsx)(s.Text,{children:"on"}),(0,A.jsx)(k.A,{Component:v,onClick:m,children:i}),(0,A.jsx)(s.Text,{children:"we couldn't calculate the current value"}),(0,A.jsx)(u.A,{align:"bottom",content:"Please check your alert configuration",children:(0,A.jsx)(s.Icon,{color:"nodeBadgeColor",size:"small",name:"information"})})]});if("REMOVED"===p.status)return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"Alert"}),(0,A.jsx)(k.A,{Component:v,as:D.N_,to:g,children:f}),(0,A.jsx)(s.Text,{children:"for"}),(0,A.jsx)(k.A,{Component:v,onClick:m,children:n.name}),(0,A.jsx)(s.Text,{children:"on"}),(0,A.jsx)(k.A,{Component:v,onClick:m,children:i}),(0,A.jsx)(s.Text,{children:"is no longer available, state can't be assessed"})]});const b=F[p.status]===F.CLEAR;return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"Alert"}),(0,A.jsx)(k.A,{Component:v,as:D.N_,to:g,children:f}),(0,A.jsx)(s.Text,{children:"for"}),(0,A.jsx)(k.A,{Component:v,onClick:m,children:n.name}),(0,A.jsx)(s.Text,{children:"on"}),(0,A.jsx)(k.A,{Component:v,onClick:m,children:i}),b?(0,A.jsx)(s.Text,{children:"recovered"}):(0,A.jsxs)(A.Fragment,{children:[(0,A.jsx)(s.Text,{strong:!0,children:F[y.status]===F.CRITICAL?"was demoted":F[y.status]===F.WARNING?"escalated":F[p.status]===F.UNDEFINED?"transitioned":"was raised"}),(0,A.jsx)(s.Text,{children:"to"}),(0,A.jsx)(_,{type:p.status}),F[p.status]===F.UNDEFINED&&(0,A.jsx)(s.Text,{children:"state"})]}),(0,A.jsx)(s.Text,{children:"with value"}),(0,A.jsx)(_,{type:p.status,text:p.value_string,hollow:!0,padding:[0]})]})};function U(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function N(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?U(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):U(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const j=e=>{let{id:n,name:t}=e;const o=(0,b.bq)(),i=(0,E.wz)(n,"slug"),a="/spaces/".concat(o,"/rooms/").concat(i,"/home");return(0,A.jsx)(k.A,{Component:v,as:D.N_,to:a,children:t})},z=e=>{let{rooms:n}=e;return n.map(((e,t)=>(0,A.jsxs)(a.Fragment,{children:[t>0&&(t<n.length-1?", ":" and "),(0,A.jsx)(j,N({},e))]},e.id)))},H=e=>{let{rooms:n}=e;if(!n.length)return null;const t=n.length>1?"rooms":"room";return(0,A.jsxs)(A.Fragment,{children:[(0,A.jsxs)(s.Text,{children:["on ",t]}),(0,A.jsx)(z,{rooms:n})]})},O=e=>{let{contexts:n}=e;n.map(((e,t)=>(0,A.jsxs)(a.Fragment,{children:[t>0&&(t<n.length-1?", ":" and "),(0,A.jsx)(v,{children:e})]},e)))},L=e=>{let{contexts:n}=e;if(!n.length)return null;const t=n.length>1?"contexts":"context";return(0,A.jsxs)(s.Text,{children:["on ",t," ",(0,A.jsx)(O,{contexts:n})]})};var G=t(64587);const J={month:"2-digit",day:"2-digit",year:"numeric",hour:"numeric",minute:"numeric",long:!1,dateStyle:void 0},q=e=>{let{start:n,end:t}=e;const{localeDateString:o}=(0,G.$j)();if(!n||!t)return null;const i=o(new Date(n),J),a=o(new Date(t),J);return(0,A.jsx)(A.Fragment,{children:(0,A.jsxs)(s.Text,{children:["(scheduled ",i," - ",a,")"]})})};function K(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function V(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?K(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):K(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const X={"silencing-rule-created":"created","silencing-rule-deleted":"deleted","silencing-rule-changed":"changed"},W=e=>{let{rooms:n=[],contexts:t=[]}=e;return n.length||t.length?(0,A.jsxs)(A.Fragment,{children:[!!n.length&&(0,A.jsx)(H,{rooms:n}),!!n.length&&!!t.length&&(0,A.jsxs)(s.Text,{children:["and ",(0,A.jsx)(L,{contexts:t})]})]}):null},Z=e=>{var n;let{action:t,notification:o,user:i,room:a,context:r}=e;const c=((null===o||void 0===o||null===(n=o.silencing)||void 0===n?void 0:n.rule)||[])[0],l=null===i||void 0===i?void 0:i.name,d=(null===r||void 0===r?void 0:r.name)||[];return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"Silencing rule"}),(0,A.jsx)(v,{children:c.name}),(0,A.jsx)(W,{rooms:a,contexts:d}),(0,A.jsxs)(s.Text,{children:["was ",X[t]]}),(0,A.jsx)(I,{user:l}),(0,A.jsx)(q,V({},c))]})};var $=t(16866);const ee=(e,n)=>{var t,o;return(null===(t=e.target)||void 0===t?void 0:t[n])||(null===(o=e.target)||void 0===o?void 0:o.id)||e[n]||e.id},ne=(e,n)=>{if(e)return n=n||"name",Array.isArray(e)?e[0][n]:e[n]},te={"space-setting-created":()=>(0,A.jsxs)(s.Text,{children:["was ",(0,A.jsx)(s.Text,{strong:!0,children:"created"})]}),"space-setting-changed":()=>(0,A.jsxs)(s.Text,{children:["was ",(0,A.jsx)(s.Text,{strong:!0,children:"updated"})]}),"space-setting-removed":()=>(0,A.jsxs)(s.Text,{children:["was ",(0,A.jsx)(s.Text,{strong:!0,children:"deleted"})]})},oe=function(){let{event:e={},user:n={},token:t={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return"space-claiming-token-created"==e.action?(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"Claiming Token was created by user"}),(0,A.jsx)(I,{hide:!n.target,user:n.name})]}):"space-claiming-token-revoked"==e.action?(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"Claiming Token"}),(0,A.jsx)(s.Text,{strong:!0,children:t.prefix}),(0,A.jsx)(s.Text,{children:"was revoked by user"}),(0,A.jsx)(I,{hide:!n.target,user:n.name})]}):null},ie=e=>{let{action:n,Netdata:t}=e;const o=te[n];return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"Setting on"}),(0,A.jsx)(v,{children:ne(t.settings,"type")}),o?(0,A.jsx)(o,{}):n,(0,A.jsxs)(s.Text,{children:["on ",ne(t.space)]})]})},ae=e=>{let{statistics:n}=e;const{live:t,stale:o,removed:i,total:a}=(null===n||void 0===n?void 0:n.nodes)||{};return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"Space statistics. Nodes:"}),(0,A.jsxs)(s.Text,{color:$.J4.live.statusTextColor,children:[t," live"]}),(0,A.jsx)(s.Text,{children:","}),(0,A.jsxs)(s.Text,{color:$.J4.stale.statusTextColor,children:[o," stale"]}),(0,A.jsx)(s.Text,{children:","}),(0,A.jsxs)(s.Text,{color:$.J4.offline.statusTextColor,children:[i," removed"]}),(0,A.jsx)(s.Text,{children:","}),(0,A.jsxs)(s.Text,{strong:!0,children:[a," total"]})]})};t(34504),t(78898);var se=t(71341);const re=e=>{let{agentId:n,agentName:t}=e;return(0,A.jsxs)(A.Fragment,{children:[(0,A.jsx)(s.Text,{children:"Agent"}),t&&(0,A.jsx)(v,{children:t}),(0,A.jsxs)(v,{children:["(",n,")"]})]})},Ae=e=>{let{stateColor:n,state:t}=e;return(0,A.jsxs)(A.Fragment,{children:[(0,A.jsx)(s.Text,{children:"became"}),(0,A.jsx)(v,{color:n,children:t})]})},ce=e=>{let{stateColor:n,state:t}=e;return(0,A.jsxs)(A.Fragment,{children:[(0,A.jsx)(s.Text,{children:"was"}),(0,A.jsx)(v,{color:n,children:t})]})},le={"node-created":ce,"node-state-live":Ae,"node-state-stale":Ae,"node-state-offline":Ae,"node-removed":ce,"node-deleted":ce,"node-restored":ce,default:ce},de=e=>{var n;let{action:t,hosts:o=[]}=e;const{id:i,name:r}=o[0],c=(0,D.Zp)(),l=(0,x.Zl)(i),d=(0,a.useCallback)((()=>c(l)),[l]),u=le[t]||le.default,h=t.split("-").at(-1),m=null===(n=se.P[h])||void 0===n?void 0:n.statusTextColor;return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"Node"}),(0,A.jsx)(k.A,{Component:v,onClick:d,children:r}),(0,A.jsx)(u,{stateColor:m,state:h})]})},ue=e=>{let{room:n}=e;const t=(0,E.XA)("untouchable");return(0,A.jsxs)(A.Fragment,{children:[(0,A.jsxs)(s.Text,{children:["was ",(0,A.jsx)(s.Text,{strong:!0,children:"added"})," to",t?" room":""]}),(0,A.jsx)(v,{children:t?ne(n):"this room"})]})},he=()=>(0,A.jsxs)(s.Text,{children:["was ",(0,A.jsx)(s.Text,{strong:!0,children:"created"})]}),me=()=>(0,A.jsxs)(s.Text,{children:["was ",(0,A.jsx)(s.Text,{strong:!0,children:"deleted"})]}),ge=e=>{let{room:n}=e;const t=(0,E.XA)("untouchable");return(0,A.jsxs)(A.Fragment,{children:[(0,A.jsxs)(s.Text,{children:["was ",(0,A.jsx)(s.Text,{strong:!0,children:"removed"})," from",t?" room":""]}),(0,A.jsx)(v,{children:t?ne(n):"this room"})]})},pe=e=>{let{room:n}=e;const t=(0,E.XA)("untouchable");return(0,A.jsxs)(A.Fragment,{children:[(0,A.jsx)(s.Text,{children:t?"room":""}),(0,A.jsx)(v,{children:t?ne(n):"this room"})]})},fe={"room-created":he,"room-deleted":me,"room-node-added":ue,"room-node-removed":ge,"room-user-added":ue,"room-user-removed":ge,"room-setting-created":he,"room-setting-changed":()=>(0,A.jsxs)(s.Text,{children:["was ",(0,A.jsx)(s.Text,{strong:!0,children:"updated"})]}),"room-setting-removed":me},ye=e=>{var n;let{action:t,hosts:o=[],Netdata:i,user:a}=e;const r=fe[t],c=null===(n=o[0])||void 0===n?void 0:n.name;return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"Node"}),(0,A.jsx)(v,{children:c}),r?(0,A.jsx)(r,{room:i.room}):t,(0,A.jsx)(I,{user:null===a||void 0===a?void 0:a.name})]})},be=e=>{let{action:n,Netdata:t,user:o}=e;const i=fe[n];return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"User"}),(0,A.jsx)(v,{children:ee(o,"name")}),i?(0,A.jsx)(i,{room:t.room}):n,(0,A.jsx)(I,{hide:!o.target,user:o.name})]})},Ee=e=>{let{action:n,Netdata:t}=e;const o=fe[n];return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"Setting on"}),(0,A.jsx)(v,{children:ne(t.settings,"type")}),o?(0,A.jsx)(o,{}):n,(0,A.jsx)(s.Text,{children:"on"}),(0,A.jsx)(pe,{room:t.room})]})},we=e=>{let{action:n,Netdata:t,user:o}=e;const i=fe[n];return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"Room"}),(0,A.jsx)(v,{children:ne(t.room)}),i?(0,A.jsx)(i,{}):n,(0,A.jsx)(I,{user:o.name})]})};function Be(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Ce(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Be(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Be(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Me={"alert-node-transition":e=>{let{chart:n,hosts:t,alert:o}=e,a=(0,i.A)(e,R);return t.map(((e,t)=>{let{id:i,name:s}=e;const r=P(P({},o),{},{name:o.name[t]},o.current?{current:P(P({},o.current),o.current.status?{status:o.current.status[t]||o.current.status[0]}:{})}:{});return(0,A.jsx)(Y,P({chart:n,nodeId:i,nodeName:s,alert:r},a),i)}))},"node-created":de,"node-state-live":de,"node-state-stale":de,"node-state-offline":de,"node-removed":de,"node-deleted":de,"node-restored":de,"agent-connected":e=>{let{agentId:n,agentName:t}=e;return(0,A.jsxs)(Q,{children:[(0,A.jsx)(re,{agentId:n,agentName:t}),(0,A.jsxs)(s.Text,{children:["has ",(0,A.jsx)(s.Text,{strong:!0,children:"connected"})," to Netdata"]})]})},"agent-connection-initialized":e=>{let{agentId:n,agentName:t}=e;return(0,A.jsxs)(Q,{children:[(0,A.jsx)(re,{agentId:n,agentName:t}),(0,A.jsxs)(s.Text,{children:["has ",(0,A.jsx)(s.Text,{strong:!0,children:"initialized"})," its connection to Netdata"]})]})},"agent-disconnected":e=>{let{agentId:n,agentName:t,reason:o}=e;return(0,A.jsxs)(Q,{children:[(0,A.jsx)(re,{agentId:n,agentName:t}),(0,A.jsxs)(s.Text,{children:["has ",(0,A.jsx)(s.Text,{strong:!0,children:"disconnected"})," from Netdata with reason: \u201c",o,"\u201d"]})]})},"agent-authenticated":e=>{let{agentId:n,agentName:t}=e;return(0,A.jsxs)(Q,{children:[(0,A.jsx)(re,{agentId:n,agentName:t}),(0,A.jsxs)(s.Text,{children:["has ",(0,A.jsx)(s.Text,{strong:!0,children:"successfully"})," authenticated"]})]})},"agent-authentication-failed":e=>{let{agentId:n,agentName:t}=e;return(0,A.jsxs)(Q,{children:[(0,A.jsx)(re,{agentId:n,agentName:t}),(0,A.jsxs)(s.Text,{children:["has ",(0,A.jsx)(s.Text,{strong:!0,children:"failed"})," to authenticate"]})]})},"room-created":we,"room-deleted":we,"room-node-added":ye,"room-node-removed":ye,"room-user-added":be,"room-user-removed":be,"space-created":e=>{let{Netdata:n,user:t}=e;return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"Space"}),(0,A.jsx)(v,{children:ne(n.space)}),(0,A.jsx)(s.Text,{children:"was created"}),(0,A.jsx)(I,{user:t.name})]})},"space-statistics":ae,"space-user-added":e=>{let{Netdata:n,user:t}=e;const{name:o}=(null===n||void 0===n?void 0:n.inviter)||{};return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"User"}),(0,A.jsx)(v,{children:ee(t,"name")}),(0,A.jsx)(s.Text,{children:"was added to this space"}),o&&(0,A.jsxs)(A.Fragment,{children:[(0,A.jsx)(s.Text,{children:"by invite of"}),(0,A.jsx)(v,{children:o})]})]})},"space-user-changed":e=>{var n;let{user:t}=e;if(!(null===(n=t.changes)||void 0===n||!n.roles)&&1===Object.keys(t.changes).length){const e=t.changes.roles.length>1,n=t.changes.roles.join(", ");return(0,A.jsxs)(Q,{children:[(0,A.jsxs)(s.Text,{children:["User ",e?"roles":"role"," for"]}),(0,A.jsx)(v,{children:ee(t,"name")}),(0,A.jsxs)(s.Text,{children:[e?"were":"was"," changed to"]}),(0,A.jsx)(v,{children:n}),(0,A.jsx)(I,{hide:!t.target,user:t.name})]})}return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"User"}),(0,A.jsx)(v,{children:ee(t,"name")}),(0,A.jsx)(s.Text,{children:"was modified"})]})},"space-user-invited":e=>{let{user:n}=e;return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"User"}),(0,A.jsx)(v,{children:ee(n,"email")}),(0,A.jsx)(s.Text,{children:"was invited to this space"}),(0,A.jsx)(I,{user:n.name})]})},"space-user-removed":e=>{let{user:n}=e;const{name:t}=n.target||{};return(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"User"}),(0,A.jsx)(v,{children:ee(n,"name")}),(0,A.jsx)(s.Text,{children:"was removed from this space"}),t&&(0,A.jsx)(I,{user:n.name})]})},"space-user-uninvited":e=>{let{event:n,user:t}=e;return"space-invite-expired"===n.reason?(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"The invite for"}),(0,A.jsx)(v,{children:ee(t,"email")}),(0,A.jsx)(s.Text,{children:"to this space has expired"})]}):(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"User"}),(0,A.jsx)(v,{children:ee(t,"email")}),(0,A.jsx)(s.Text,{children:"was uninvited from this space"}),(0,A.jsx)(I,{hide:!t.target,user:t.name})]})},"space-claiming-token-created":oe,"space-claiming-token-revoked":oe,"silencing-rule-created":Z,"silencing-rule-deleted":Z,"silencing-rule-changed":Z,"space-settings-created":ie,"space-settings-removed":ie,"space-settings-changed":ie,"room-settings-created":Ee,"room-settings-removed":Ee,"room-settings-changed":Ee},Te=e=>{let{data:n}=e;const{action:t,roomId:o,space:i}=n.source,a=(0,b.ns)(i.id,"slug"),r=(0,E.wz)(o,"slug"),c=Me[t];return c?(0,A.jsx)(c,Ce(Ce({},n.source),{},{spaceSlug:a,roomSlug:r})):(0,A.jsxs)(Q,{children:[(0,A.jsx)(s.Text,{children:"Event"})," ",(0,A.jsx)(s.Text,{strong:!0,children:t})," ",(0,A.jsx)(s.Text,{children:"emitted"})]})},Ie=["value"];function ve(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const _e=e=>{let{value:n}=e,t=(0,i.A)(e,Ie);const a=new Date(n),{localeTimeString:s,localeDateString:c}=(0,G.$j)(),l=isNaN(a.valueOf())?"Missing date & time":"".concat(c(a,{long:!1})," ").concat(s(a,{secs:!0}));return(0,A.jsx)(r.Ay,function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?ve(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):ve(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({value:l},t))},Qe=["visualization","type","value","data"];function De(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const xe={bar:m,"bar-with-integer":m,duration:m,pill:y,number:r.Ay,string:r.Ay,feedTemplate:Te,datetime:_e,value:r.Ay},ke={bar:m,pill:y,value:r.Ay,richValue:d,feedTemplate:Te,rowOptions:"skip"},Re=e=>{let{visualization:n,type:t,value:a,data:s}=e,r=(0,i.A)(e,Qe);const c=ke[n]||xe[t]||ke.value;return c===ke.rowOptions?null:(0,A.jsx)(c,function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?De(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):De(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({value:a,type:t,rowOptions:null===s||void 0===s?void 0:s.rowOptions,data:s},r))},Se=["displayName","name","sortable","max","dummy","cellSize","valueOptions","summary","type","visualization","wrap"];function Pe(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Fe(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Pe(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Pe(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Ye={sum:"sum",min:"min",max:"max",extent:"extent",mean:"mean",median:"median",unique:"unique",uniqueCount:"uniqueCount",count:"count"},Ue=function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:80;const t=String(e);return t.length>n?"".concat(t.slice(0,n),"\u2026"):t},Ne=e=>{let{row:n,columns:t}=e;return(0,A.jsx)(s.Flex,{gap:1,flexWrap:!0,children:Object.entries(n).filter((e=>{let[n]=e;return t[n]&&!t[n].dummy})).map((e=>{var n;let[o,i]=e;const a=(null===(n=t[o])||void 0===n?void 0:n.displayName)||o;if(null===i)return null;const r="object"===typeof i?JSON.stringify(i):i;return(0,A.jsxs)(s.TextSmall,{children:[(0,A.jsx)(s.TextSmall,{strong:!0,children:a})," ",Ue(r)]},o)}))})},je=(e,n,t)=>{let{groupBy:o,columnVisibility:r={}}=t;return(0,a.useMemo)((()=>{const t=((e,n)=>{var t;const o=Object.entries(e).filter((e=>{let[,n]=e;return n}));if(1!==o.length)return!1;const[i]=o[0],a=Object.keys(n).find((e=>n[e].displayName===i));return a&&"timestamp"===(null===(t=n[a])||void 0===t?void 0:t.type)})(r,n),a=(e||[]).reduce(((e,t)=>{const a=n[t];if(!a)return e;const{displayName:r,name:c,sortable:l,max:d,dummy:u,cellSize:h,valueOptions:m,summary:g,type:p,visualization:f,wrap:y=!1}=a,b=(0,i.A)(a,Se);return u?e:[...e,Fe(Fe(Fe({},b),{},{displayName:r,id:r,accessorFn:e=>e[t],cell:e=>{let{getValue:n,row:t,cell:i}=e;const a=i.getIsAggregated(),s=n(),r=a&&(Ye[g]===Ye.count||Ye[g]===Ye.uniqueCount||!Ye[g]);return(0,A.jsx)(Re,{description:c,data:t.original,maxValue:d,padding:t.depth>0?[0,0,0,2.5*t.depth]:[0],strong:a,value:r?"".concat(s,"x"):s,tooltipValue:r?"".concat(s," ").concat(1===s?"item":"items"," associated with ").concat(o," grouping"):s,valueOptions:r?Fe(Fe({},m),{},{transform:"none"}):m,type:p,visualization:f,wrap:y})},header:(0,A.jsx)(s.TextSmall,{strong:!0,children:r}),headerString:()=>r,enableSorting:!!l,aggregationFn:Ye[g]||Ye.count},h),{},{meta:Fe({},r!==c?{tooltip:c}:{})})]}),[]);return t&&a.push({id:"Summary",displayName:"Summary",accessorFn:()=>null,cell:e=>{let{row:t}=e;return(0,A.jsx)(Ne,{row:t.original,columns:n})},header:(0,A.jsx)(s.TextSmall,{strong:!0,children:"Summary"}),headerString:()=>"Summary",enableSorting:!1,size:500,minSize:200,maxSize:5e3,fullWidth:!0}),a}),[e,n,o,r])}},65687(e,n,t){"use strict";t.d(n,{$f:()=>b,Ay:()=>T,WT:()=>C,k4:()=>y});var o=t(80045),i=t(64467),a=(t(89463),t(33110),t(98992),t(54520),t(3949),t(96540)),s=t(51510),r=t(16099),A=t.n(r),c=t(42358),l=t(45087),d=t(98830),u=t(74848);const h=["icon","iconRotate","children","onClick","strong","testId","textAlign","wrap","truncate","rowOptions","ref","labelProps"],m=["description","tooltipValue","value","valueOptions","wrap","rowOptions","showRaw","type"];function g(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function p(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?g(Object(t),!0).forEach((function(n){(0,i.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):g(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const f=(0,s.default)(c.Icon).attrs({color:"text",height:"16px",width:"16px"}).withConfig({displayName:"value__Arrow",componentId:"sc-1apuy8c-0"})(["rotate:",";transition:all 200ms ease;"],(e=>{let{rotate:n}=e;return n})),y=function(e){let{transform:n,decimalPoints:t=0,units:o,defaultValue:i}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const s=(0,d.py)();return(0,a.useMemo)((()=>{switch(n){case"number":if(null===e)return{value:i,units:""};if(o){const n=(0,d.Im)(e,o);return{value:"number"===typeof n.value?(0,d.iX)(n.value,{decimalPoints:t}):n.value,units:n.units}}return{value:(0,d.iX)(e,{decimalPoints:t}),units:""};case"duration":return{value:(0,d.Im)(e,o||"s").value,units:""};case"datetime":return{value:s(e,{defaultValue:i}),units:""};case"datetime_usec":return{value:s(e,{defaultValue:i,units:"us"}),units:""};case"xml":return{value:A()(e||i||""),units:""};case"stringify":try{return JSON.stringify(e)}catch(a){return String(e||i||"")}default:return{value:null===e||"undefined"===typeof e?i:e,units:""}}}),[e,o,s])},b=e=>{let{description:n="",value:t,units:o=""}=e;return n?(0,u.jsxs)(c.Flex,{column:!0,gap:1,justifyContent:"center",children:[(0,u.jsx)(c.TextSmall,{children:n}),(0,u.jsxs)(c.TextSmall,{strong:!0,textAlign:"center",children:[t," ",o]})]}):"".concat(t," ").concat(o)},E=(0,s.default)(c.Flex).withConfig({displayName:"value__ValueContainer",componentId:"sc-1apuy8c-1"})(["",""],(e=>{let{overflowWrap:n}=e;return n&&"* {\n      overflow-wrap: anywhere;\n      white-space: pre-wrap;\n      word-break: break-word;\n    }\n  "})),w={debug:{strong:!0,color:"textLite"},normal:{},notice:{strong:!0},warning:{strong:!0,color:"warningText"},critical:{strong:!0,color:"errorText"}},B=(0,s.default)(c.Text).attrs((e=>p(p({},e),w[e.severity]||w.normal))).withConfig({displayName:"value__ValueLabel",componentId:"sc-1apuy8c-2"})([""]),C=e=>{let{icon:n,iconRotate:t,children:i,onClick:a,strong:s,testId:r="cell",textAlign:A,wrap:c,truncate:l=!0,rowOptions:d,ref:m,labelProps:g}=e,y=(0,o.A)(e,h);return(0,u.jsxs)(E,p(p({cursor:a?"pointer":"inherit",gap:.5,onClick:a,overflow:"hidden",ref:m,width:{max:"100%",base:"100%"},flexWrap:!0,overflowWrap:c},y),{},{children:[n&&(0,u.jsx)(f,{name:n,rotate:t}),(0,u.jsx)(B,p(p(p({"data-testid":"".concat(r,"-value"),strong:s,textAlign:A,truncate:!c&&l,whiteSpace:c?"wrap":"nowrap"},g),d),{},{children:i}))]}))},M=e=>{let{value:n}=e;return(0,u.jsx)(c.Icon,{name:n?"checkmark_s":"x",color:n?"textFocus":"textNoFocus",width:"16px",height:"16px"})},T=e=>{let{description:n="",tooltipValue:t,value:i,valueOptions:a={},wrap:s,rowOptions:r,showRaw:A,type:d}=e,h=(0,o.A)(e,m);const g=y(i,a),f=g.value,E=g.units;return"boolean"===d?(0,u.jsx)(c.Flex,p(p({justifyContent:"center",alignItems:"center"},h),{},{children:(0,u.jsx)(M,{value:i})})):A&&"xml"!==(null===a||void 0===a?void 0:a.transform)?(0,u.jsxs)(c.Flex,{gap:3,justifyContent:"between",flex:!0,children:[(0,u.jsxs)(C,{wrap:s,rowOptions:r,justifyContent:"start",labelProps:{strong:!0},children:[f," ",E]}),f!==i&&(0,u.jsx)(C,{wrap:s,rowOptions:r,justifyContent:"end",labelProps:{color:"textDescription"},title:"Raw value",children:i})]}):(0,u.jsx)(l.A,p(p({content:!s&&(0,u.jsx)(b,{description:n,value:t||i,units:a.units}),"data-testid":"valueComponent",isBasic:!0},h),{},{children:(0,u.jsxs)(C,{wrap:s,rowOptions:r,justifyContent:"number"===a.transform?"end":"start",children:[f," ",(0,u.jsx)(c.TextSmall,{children:E})]})}))}},41073(e,n,t){"use strict";t.d(n,{A:()=>w,G:()=>E});var o=t(80045),i=t(64467),a=(t(26910),t(98992),t(54520),t(72577),t(3949),t(81454),t(8872),t(62953),t(88055)),s=t.n(a),r=t(72337),A=t(57377),c=t(52154);const l=["updateEvery","merge","reset","groupBy","items","direction","dataOnly","showIds","facetsDelta","histogramDelta","itemsDelta","aggregatedView"];function d(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function u(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?d(Object(t),!0).forEach((function(n){(0,i.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):d(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const h=90,m=110,g=160,p=1e3,f={minSize:1,maxSize:5e3},y=50,b={uniqueKey:{size:h,downLimit:h-y,upLimit:h+y},bar:{size:h,downLimit:h-y,upLimit:h+y},value:{size:m,downLimit:m-y,upLimit:m+y},feedTemplate:{size:p,downLimit:p-y,upLimit:p+y,minSize:1,maxSize:5e3,fullWidth:!0},pill:{size:m,downLimit:m-y,upLimit:m+y},timestamp:{size:g,downLimit:g-y,upLimit:g+y},datetime:{size:g,downLimit:g-y,upLimit:g+y}},E=(e,n)=>{var t,i,a,d,h;const{updateEvery:g,merge:p,reset:E,groupBy:w,items:B,direction:C,dataOnly:M,showIds:T=!1!==e.showIds,facetsDelta:I,histogramDelta:v,itemsDelta:_,aggregatedView:Q}=n,D=(0,o.A)(n,l);let x=E?n.columns:M?s()(e.columns):s()(p?(0,r.A)(n.columns,e.columns):n.columns),k=E?{}:u({},e.sortedColumnsObj);const R=D.hasHistory||e.hasHistory,S=M?e.pagination:n.pagination;let P=Object.keys(x).reduce(((n,t)=>{var o;const i=x[t],{name:a,sticky:s,visible:r,index:A,id:c=t}=i;return i.displayName=T?c||t:a,n.columnVisibility[i.displayName]=!E&&e.loaded?!!n.columnVisibility[i.displayName]:r,!s||!E&&e.loaded||n.pinnedColumns.push(i.displayName),i.valueOptions=i.valueOptions||{},i.cellSize=(e=>{let{visualization:n,type:t,fullWidth:o=!1,uniqueKey:i}=e;return u(u({},f),{},{fullWidth:o},(i&&"timestamp"!==t?b.uniqueKey:b[t])||b[n]||{minSize:1,maxSize:5e3,size:m,downLimit:m-y,upLimit:m+y})})(i),i.sortable=null!==(o=i.sortable)&&void 0!==o?o:i.sortable&&!(null!==S&&void 0!==S&&S.enabled),"undefined"!==typeof A&&(k[k[A]!==t?i.displayName:A]=t),n}),{columnVisibility:E?{}:u({},e.columnVisibility||{}),pinnedColumns:E?[]:[...e.pinnedColumns||[]]}),F=E||null===(t=e.alphabetical)||void 0===t||!t.length?Object.keys(x).sort(((e,n)=>(x[e].displayName||x[e].name).localeCompare(x[n].displayName||x[n].name,void 0,{sensitivity:"accent",ignorePunctuation:!0}))):e.alphabetical,Y=E?n.aggregations:p?s()(e.aggregations):n.aggregations||s()(e.aggregations);I&&Array.isArray(Y)&&I.forEach((e=>{const n=Y.find((n=>n.id===e.id));n?e.options.forEach((e=>{if(isNaN(e.count))return;const t=n.options.find((n=>n.id===e.id));t?t.count=(t.count||0)+((null===e||void 0===e?void 0:e.count)||0):n.options.push(e)})):Y.push(e)}));let U=E?n.histogram:p?s()(e.histogram):n.histogram||s()(e.histogram);if(v){var N;if(!E&&(null===(N=e.histogram)||void 0===N?void 0:N.id)!==(null===v||void 0===v?void 0:v.id))return;const n=U.chart.result.labels;if((0,A.Ay)(n,v.chart.result.labels))U.chart.result.data=[...U.chart.result.data,...v.chart.result.data];else{const e=((e,n)=>{const t=n.reduce(((n,t,o)=>{const i=e.findIndex((e=>t===e));return-1===i||(n[i]=o),n}),{});return n=>e.reduce(((e,o,i)=>("undefined"===typeof t[i]?e.push([0,0,0]):e.push(n[t[i]]),e)),[])})(n,v.chart.result.labels);v.chart.result.data.forEach((n=>U.chart.result.data.push(e(n))))}}let j={};Object.keys(n.columns).forEach((e=>{j[n.columns[e].index]=e}));let z=n.data.map((e=>e.reduce(((e,n,t)=>{const o=j[t];return o?(e[o]=n,e):e}),{}))),H=E?z:p?"forward"===C?z.concat(e.data||[]):(e.data||[]).concat(z):z;D.tail&&H.slice(0,500);let O=null!==S&&void 0!==S&&S.enabled&&S.column&&Array.isArray(H)&&null!==(i=H[H.length-1])&&void 0!==i&&i[S.column]&&null!==(a=H[0])&&void 0!==a&&a[S.column]?{anchorBefore:H[H.length-1][S.column],anchorAfter:H[0][S.column],anchorUnits:S.units}:{};const L=n.defaultSortColumn||e.defaultSortColumn,G=(E?n.acceptedParams||[]:n.acceptedParams||e.acceptedParams).includes("direction");let J=0;Q&&(J=H.reduce(((e,n)=>e+(n[Q.column]||0)),0));const q=u(u(u(u(u(u(u({},e||c.zb.table),D),{},{groupByColumns:E?w:w||e.groupByColumns,data:H,columns:x,updatedAt:(new Date).getTime(),updateEvery:g||e.updateEvery,sortedColumns:Object.values(k).sort(((e,n)=>{var t,o;return(null===(t=x[e])||void 0===t?void 0:t.index)-(null===(o=x[n])||void 0===o?void 0:o.index)})),sortColumn:null===(d=x[L])||void 0===d?void 0:d.displayName,sortDirection:L?null===(h=x[L])||void 0===h?void 0:h.sort:"descending"},P),O),{},{totalSize:D.totalSize||(null===B||void 0===B?void 0:B.matched)||(!M||"forward"===C||z.length||n.partial?(e.totalSize||0)+(_?_.matched:0):0),actualSize:J},!(null===S||void 0===S||!S.enabled)&&{hasNextPage:!(M&&"forward"!==C&&!z.length&&!n.partial)&&(!B||0!==B.after)}),!(null===S||void 0===S||!S.enabled)&&G&&{hasPrevPage:!D.tail&&(!(M&&"forward"===C&&!z.length&&!n.partial)&&(!B||0!==B.before))}),{},{offset:p?null:(null===B||void 0===B?void 0:B.before)||null,loading:!1,loaded:!0,showIds:T,merged:p,latestDirection:C,hasHistory:R,hasDirection:G,aggregations:Y,histogram:U,sortedColumnsObj:k,filtersToRefresh:(D.requiredParams||[]).reduce(((e,n)=>n.uniqueView?u(u({},e),{},{[n.id]:!0}):e),{}),reset:E,aggregatedView:Q,alphabetical:F,transactionId:null});return k=null,U=null,Y=null,O=null,P=null,x=null,j=null,z=null,H=null,F=null,q},w=(e,n)=>{e((e=>E(e,n)))}},89975(e,n,t){"use strict";t.d(n,{A:()=>f});var o=t(64467),i=(t(26910),t(98992),t(54520),t(72577),t(3949),t(81454),t(8872),t(62953),t(96540)),a=t(51510),s=t(66118),r=t(44731),A=t(42358),c=t(64587),l=t(55164),d=t(74848);function u(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function h(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?u(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):u(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}s.t1.register(s.kc,s.PP,s.E8,s.FN,s.No,s.s$,s.m_,s.ZT,s.A6,s.Bs,s.Jb,s.P$,s.ju);const m=()=>(0,d.jsxs)(A.Flex,{gap:1,flex:"grow",background:"modalBackground",alignItems:"center",justifyContent:"center",children:[(0,d.jsx)(A.Icon,{name:"warning_triangle",color:"warning"}),(0,d.jsx)(A.Text,{children:"Something went wrong while loading this chart."})]});s.m_.positioners.follow=function(e,n){return null===e||void 0===e||!e.length||n.y>360?(this._resolveAnimations().update(this,{opacity:0}),!1):(0===this.opacity&&this._resolveAnimations().update(this,{opacity:1}),{x:n.x,y:n.y})};const g=e=>{let{data:n={},localeTimeString:t,theme:o}=e;const{chartType:i,datasets:a}=n;if("doughnut"===i)return{};if("bar"===i)return{x:{stacked:!0},y:{stacked:!0}};if("bubble"===i){const e=(a||[]).reduce(((e,n)=>{const t=((null===n||void 0===n?void 0:n.data)||[]).map((e=>{let{x:n}=e;return n})).sort(((e,n)=>e-n)),o=((null===n||void 0===n?void 0:n.data)||[]).map((e=>{let{y:n}=e;return n})).sort(((e,n)=>e-n)),i=null===t||void 0===t?void 0:t[0],a=null===t||void 0===t?void 0:t[(null===t||void 0===t?void 0:t.length)-1],s=null===o||void 0===o?void 0:o[0],r=null===o||void 0===o?void 0:o[(null===o||void 0===o?void 0:o.length)-1];return{minX:null===e.minX?i:Math.min(i,e.minX),maxX:null===e.maxX?a:Math.max(a,e.maxX),minY:null===e.minY?s:Math.min(s,e.minY),maxY:null===e.maxY?r:Math.max(r,e.maxY)}}),{minX:null,maxX:null,minY:null,maxY:null});return{x:{title:{display:!!n.xAxisLabel,text:n.xAxisLabel},min:e.minX-.5,max:e.maxX+.5},y:{title:{display:!!n.yAxisLabel,text:n.yAxisLabel},min:e.minY-.5}}}return{x:{ticks:{callback:function(e){const n=this.getLabelForValue(e);return n?t(1e3*n):""},color:(0,A.getColor)("textLite")({theme:o})}},y:{beginAtZero:!0,ticks:{color:(0,A.getColor)("textLite")({theme:o})}}}},p={area:"line",heatmap:"line"},f=e=>{let{data:n}=e;const t=(0,i.useContext)(a.ThemeContext),{localeTimeString:o}=(0,c.$j)(),s=(0,i.useMemo)((()=>{var e,t,o,i;const a="bar"===n.chartType?!1===n.stacked?((null===(e=n.datasets)||void 0===e||null===(e=e[0])||void 0===e?void 0:e.data)||[]).map((e=>{let{label:n}=e;return n})):null===(t=n.datasets)||void 0===t?void 0:t.map((e=>{let{label:n}=e;return n})):null===(o=n.datasets)||void 0===o||null===(o=o[0])||void 0===o?void 0:o.data.map((e=>e.t?e.t:e.dt?new Date(e.dt):null)),s="bar"===n.chartType?(e=>{if(!e)return{};const{datasets:n,stacked:t}=e;return!1===t?(e=>null!==e&&void 0!==e&&e.length?e.map((e=>{let{data:n,label:t,color:o}=e;return{label:t,data:(null===n||void 0===n?void 0:n.map((e=>{let{v:n}=e;return n})))||[],backgroundColor:(null===n||void 0===n?void 0:n.map((e=>e.color||o)))||[]}})):[])(n):(e=>{var n;return null!==e&&void 0!==e&&e.length?((null===(n=e[0])||void 0===n?void 0:n.data)||[]).map((n=>{let{label:t,color:o}=n;return{label:t,backgroundColor:o,stack:"Stack-0",data:e.map((e=>{var n;return null===(n=e.data.find((e=>e.label===t)))||void 0===n?void 0:n.v}))}})):[]})(n)})(n):null===(i=n.datasets)||void 0===i?void 0:i.map((e=>{const t={label:e.label,borderColor:e.color||"rgba(75, 192, 192, 1)",backgroundColor:e.color||"rgba(75, 192, 192, 0.2)"};if("doughnut"===n.chartType){const{data:n,colors:o,labels:i}=(e.data||[]).reduce(((e,n)=>{let{v:t,color:o,label:i}=n;return{data:[...e.data,t],colors:[...e.colors,o],labels:[...e.labels,i]}}),{data:[],colors:[],labels:[]});return h(h({},t),{},{data:n,labels:i},o.length?{backgroundColor:o}:{})}return"bubble"===n.chartType?h(h({},t),{},{data:(e.data||[]).map((e=>({x:e.x,y:e.y,r:e.r})))}):h(h({},t),{},{data:(e.data||[]).map((e=>e.v)),fill:"area"===n.chartType,tension:.1})}));return{labels:a||[],datasets:s||[]}}),[n]),u=(0,i.useMemo)((()=>g({data:n,localeTimeString:o,theme:t})),[n.chartType,o,t]),f=(0,i.useMemo)((()=>({interaction:{axis:"x"},plugins:{legend:{position:"bottom",align:"start",onClick:(e,t,o)=>{if("doughnut"===n.chartType){const e=t.index;o.chart.toggleDataVisibility(e),o.chart.update()}else{const e=o.legendItems.findIndex((e=>e.text===t.text));o.chart.isDatasetVisible(e)?o.chart.hide(e):o.chart.show(e)}},labels:{generateLabels:e=>{if("doughnut"===n.chartType){const o=s.datasets[0];return(o.labels||[]).map(((i,a)=>{var s;const r=o.data[a],c=(null===(s=n.datasets[0])||void 0===s?void 0:s.unit)||"",l=c?"".concat(i,": ").concat(r," ").concat(c):"".concat(i,": ").concat(r);return{fontColor:(0,A.getColor)("textLite")({theme:t}),text:l,fillStyle:Array.isArray(o.backgroundColor)?o.backgroundColor[a]:o.backgroundColor,strokeStyle:o.borderColor,hidden:!1===e.getDataVisibility(a),index:a}}))}return s.datasets.map(((n,o)=>({fontColor:(0,A.getColor)("textLite")({theme:t}),text:n.label,fillStyle:n.backgroundColor,strokeStyle:n.borderColor,pointStyle:"Daily count"===n.label?"rect":"line",hidden:!e.isDatasetVisible(o),order:n.order}))).sort(((e,n)=>e.order-n.order))},usePointStyle:!0}},tooltip:h(h({enabled:!0,mode:"nearest",yAlign:"bottom",position:"follow",backgroundColor:(0,A.getColor)("tooltip")({theme:t}),color:(0,A.getColor)("tooltipText")({theme:t})},"doughnut"!==n.chartType&&"bar"!==n.chartType?{intersect:!1,usePointStyle:!0}:{}),{},{callbacks:{title:e=>{const[t]=e;return t?"doughnut"===n.chartType?t.dataset.labels[t.dataIndex]:"bar"===n.chartType?t.label:"bubble"===n.chartType?"":o(1e3*t.label):""},labelPointStyle:e=>({pointStyle:"Total Nodes"===e.dataset.label?"rect":"line"}),label:e=>{var t;let o=e.dataset.label||"";o&&(o+=": "),"bubble"===n.chartType?o+="(".concat(e.raw.x,", ").concat(e.raw.y,", ").concat(e.raw.r,")"):o+=e.formattedValue;const i=null===(t=n.datasets)||void 0===t?void 0:t[e.datasetIndex];return null!==i&&void 0!==i&&i.unit&&(o+=" ".concat(i.unit)),o}}})},responsive:!0,maintainAspectRatio:!1,scales:u})),[t,s,u]);return(0,d.jsxs)(A.Flex,{"data-testid":"insights-report-chart-container",column:!0,gap:6,margin:[4,0],children:[(0,d.jsx)(A.H4,{children:n.title}),(0,d.jsx)(A.Flex,{flex:!1,width:"100%",height:75,children:(0,d.jsx)(l.Ay,{fallback:m,children:(0,d.jsx)(r.t1,{type:p[n.chartType]||n.chartType,data:s,options:f})})})]})}},82326(e,n,t){"use strict";t.d(n,{Z:()=>o,l:()=>i});const o={HEADER:"HEADER",TEXT:"TEXT",CHART:"CHART",LOAD_CHART:"LOAD_CHART",LOAD_SYSTEMD_JOURNAL:"LOAD_SYSTEMD_JOURNAL",LOAD_WINDOWS_EVENTS:"LOAD_WINDOWS_EVENTS",MARKDOWN_TABLE:"MARKDOWN_TABLE",TABLE:"TABLE"},i={[o.LOAD_SYSTEMD_JOURNAL]:"systemd-journal",[o.LOAD_WINDOWS_EVENTS]:"windows-events"}},23392(e,n,t){"use strict";t.d(n,{A:()=>y});var o=t(64467),i=(t(98992),t(54520),t(3949),t(8872),t(96540)),a=t(42358),s=t(87398),r=t(24609),A=t(19186),c=t(87733),l=t(28657),d=t(74848);function u(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function h(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?u(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):u(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const m={node:"selectedNodes",context:"selectedContexts",dimension:"selectedDimensions",label:"selectedLabels",instance:"selectedInstances"},g=function(){return(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).reduce(((e,n)=>{let{label:t,value:o}=n;const i=m[t];return i?h(h({},e),{},{[i]:Array.isArray(o)?o:[o]}):e}),{})},p=["dimension","node","instance"],f={avg:"average",default:"average"},y=e=>{let{index:n,chartId:t,id:o,data:u,variables:m,onFinishFetching:y}=e;const{before:b,after:E,context:w,groupBy:B,filterBy:C,timeAgg:M,valueAgg:T}=u||{},I=(0,i.useMemo)((()=>{const e=(B||[]).filter((e=>p.includes(e)));return e.length?e:["label"]}),[B]),v=(0,i.useMemo)((()=>(B||[]).filter((e=>!p.includes(e)))),[B]),{nodes:_=[]}=m||{},Q=(0,r.vt)(),D=(0,A.ID)(),x=(0,s.eg)(),k=(0,c.K)({spaceId:Q,roomId:D}),R=(0,i.useMemo)((()=>{const e=x.makeChart({attributes:h(h(h(h(h({before:new Date(b).getTime()/1e3,after:new Date(E).getTime()/1e3,aggregationMethod:T||"avg",groupingMethod:f[M]||f.default},w?{contextScope:[w]}:{}),_.filter((e=>"*"!==e)).length?{nodesScope:_}:{}),{},{groupBy:I},v?{groupByLabel:v}:{}),g(C)),{},{roomId:D,enabledResetRange:!1,host:k,expandable:!1})});return e.on("finishFetch",(()=>{"function"===typeof y&&t&&y(t)})),x.getRoot().appendChild(e),e}),[x,D,u,_,T,M]);return R?(0,d.jsx)(a.Flex,{flex:!1,width:"100%",height:75,margin:[4,0],children:(0,d.jsx)(l.A,{"data-chartid":"insights-report-".concat(o,"-block-").concat(n),chart:R})}):null}},37528(e,n,t){"use strict";t.d(n,{A:()=>x});var o=t(96540),i=t(51510),a=t(42358),s=t(24155),r=(t(26910),t(98992),t(54520),t(81454),t(1011)),A=t(74848);const c=e=>{let{columns:n}=e;return(0,A.jsx)("thead",{children:(0,A.jsx)("tr",{children:n.map((e=>{let{id:n,displayName:t}=e;return(0,A.jsx)("th",{children:t},n)}))})})},l=e=>{let{columns:n,data:t}=e;return(0,A.jsx)("tbody",{children:t.map(((e,t)=>(0,A.jsx)("tr",{children:n.map((n=>{let{id:t}=n;return(0,A.jsx)("td",{children:e[t]},t)}))},t)))})},d=e=>{let{data:n=[],dataColumns:t=[],columnVisibility:i={}}=e;const a=(0,r.A)(),s=(0,o.useMemo)((()=>t.filter((e=>{let{id:n}=e;return i[n]})).sort(((e,n)=>e.index-n.index))),[t,i]);return(0,A.jsxs)("table",{id:a,"data-report-table-id":a,"data-report-block-type":"TABLE",children:[(0,A.jsx)(c,{columns:s}),(0,A.jsx)(l,{data:n,columns:s})]})};var u=t(52035),h=t(34843),m=t(18790),g=t(25316),p=t(19186),f=(t(3949),t(64467)),y=t(91130),b=t(15505),E=t(39175);function w(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function B(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?w(Object(t),!0).forEach((function(n){(0,f.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):w(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}var C=t(41073),M=t(52154),T=t(57377);const I=(0,m.I)((e=>{let{roomId:n,nodeId:t,fn:o,payload:i}=e;return(0,u.eU)((()=>(e=>{let{roomId:n,nodeId:t,fn:o,payload:i={}}=e;const a=(0,E.tB)(n);return y.A.post(a?"/host/".concat(t,"/api/v3/function?function=").concat(o):"/api/v2/nodes/".concat(t,"/function?function=").concat(o),B(B({},i),{},{timeout:12e4,last:200}),B({transform:b.Ds},a&&{baseURL:window.envSettings.agentApiUrl}))})({roomId:n,nodeId:t,fn:o,payload:i})))}),T.Ay);var v=t(61533);t(8872),t(62953);function _(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Q(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?_(Object(t),!0).forEach((function(n){(0,f.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):_(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const D=(0,i.default)(a.Table).withConfig({displayName:"loadFunction__StyledTable",componentId:"sc-qw5wa4-0"})(["*{font-family:monospace;letter-spacing:0.09px;}"]),x=e=>{let{id:n,data:t,fn:i="systemd-journal",isPrintable:r,onFinishFetching:c}=e;const{after:l,before:u,filterBy:m,nodeId:f}=t||{},y=(e=>{let{after:n,before:t,filterBy:o=[]}=e;const i=o.reduce(((e,n)=>Q(Q({},e),{},{[n.key]:n.values})),{});return{after:n?new Date(n).getTime():0,before:t?new Date(t).getTime():0,selections:i}})({after:l,before:u,filterBy:m}),{loaded:b,value:E,error:w}=(e=>{var n;let{nodeId:t,fn:i,payload:a}=e;const s=(0,p.ID)(),r=(0,h.md)((0,g.A)(I({roomId:s,nodeId:t,fn:i,payload:a}))),A=(0,o.useMemo)((()=>{const{state:e,data:n}=r;return"loading"===e||"hasError"===e?null:(0,C.G)(M.zb.table,null===n||void 0===n?void 0:n.data)}),[r]);return{loaded:"loading"!==r.state,value:A,error:"hasError"===r.state?(null===(n=r.error)||void 0===n||null===(n=n.response)||void 0===n||null===(n=n.data)||void 0===n?void 0:n.errorMessage)||"Something went wrong":""}})({nodeId:f,fn:i,payload:y}),{data:B,columns:T,sortedColumns:_,columnVisibility:x}=E||{},k=(0,v.A)(_,T,{groupBy:""});return(0,o.useEffect)((()=>{b&&"function"===typeof c&&n&&c(n)}),[n,b,c]),b?w?(0,A.jsx)(a.Flex,{children:(0,A.jsxs)(a.Text,{color:"textLite",children:["Error: ",w]})}):B&&B.length?r?(0,A.jsx)(a.Flex,{children:(0,A.jsx)(d,{data:B,dataColumns:k,columnVisibility:x})}):(0,A.jsx)(a.Flex,{children:(0,A.jsx)(D,{data:B,dataColumns:k,columnVisibility:x})}):(0,A.jsx)(a.Flex,{children:(0,A.jsx)(a.Text,{color:"textLite",children:"No data"})}):(0,A.jsxs)(a.Flex,{column:!0,gap:2,children:[(0,A.jsxs)(a.Flex,{gap:2,children:[(0,A.jsx)(s.A,{width:"40px"}),(0,A.jsx)(s.A,{width:"300px"})]}),(0,A.jsxs)(a.Flex,{gap:2,children:[(0,A.jsx)(s.A,{width:"40px"}),(0,A.jsx)(s.A,{width:"300px"})]}),(0,A.jsxs)(a.Flex,{gap:2,children:[(0,A.jsx)(s.A,{width:"40px"}),(0,A.jsx)(s.A,{width:"300px"})]})]})}},83341(e,n,t){"use strict";t.d(n,{A:()=>p});var o=t(96540),i=t(54852),a=t(64467),s=t(80045),r=(t(98992),t(54520),t(3949),t(81454),t(32277)),A=t(1011),c=t(74848);const l=["children"];function d(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function u(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?d(Object(t),!0).forEach((function(n){(0,a.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):d(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const h=e=>{let{children:n}=e,t=(0,s.A)(e,l);const o=(0,A.A)();return(0,c.jsx)("table",u(u({id:o,"data-report-table-id":o,"data-report-block-type":"TABLE"},t),{},{children:n}))},m=e=>{let{content:n=""}=e;const t=n.split("\n");return(0,c.jsx)("div",{"data-report-block-type":"CODE",children:t.map(((e,n)=>(0,c.jsx)("p",{className:"code-line",children:e},n)))})},g=function(){let{isPrintable:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return u(u({},r.A),{},{fence:u(u({},r.A.fence),e?{render:m}:{})},e?{table:{render:h}}:{})},p=(0,o.memo)((e=>{let{isPrintable:n,children:t}=e;return(0,c.jsx)(i.A,{transformConfiguration:{nodes:g({isPrintable:n})},children:t})}))},3066(e,n,t){"use strict";t.d(n,{A:()=>H});var o=t(64467),i=t(80045),a=(t(98992),t(23215),t(54520),t(3949),t(81454),t(8872),t(62953),t(96540)),s=t(63950),r=t.n(s),A=t(42358),c=t(89975),l=t(23392),d=t(37528);const u=t(51510).default.table.withConfig({displayName:"styled__ReportTable",componentId:"sc-1a193ss-0"})(["width:100%;border-collapse:collapse;border:1px solid ",";& th{padding:8px;}& td{padding:4px 8px;}& th,& td{border:1px solid ",";}"],(0,A.getColor)("border"),(0,A.getColor)("border"));var h=t(1011),m=t(74848);const g=e=>{let{data:n,isPrintable:t}=e;const o=(0,h.A)();return(0,m.jsxs)(A.Flex,{column:!0,gap:6,margin:[4,0],children:[t?null:(0,m.jsx)(A.H4,{children:n.title}),(0,m.jsxs)(u,{id:o,"data-report-table-id":o,children:[t?(0,m.jsx)("caption",{children:n.title}):null,(0,m.jsx)("thead",{children:(0,m.jsx)("tr",{children:n.columns.map(((e,n)=>(0,m.jsx)("th",{children:e},n)))})}),(0,m.jsx)("tbody",{children:n.rows.map(((e,n)=>(0,m.jsx)("tr",{children:e.map(((e,n)=>(0,m.jsx)("td",{children:e},n)))},n)))})]})]})};var p=t(83341),f=t(27955),y=t(17702);function b(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function E(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?b(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):b(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const w=e=>{let{cta:n,checkSelection:t,onClick:o,children:i}=e;return(0,m.jsxs)("div",{id:"insights-report-content-text-selection",onMouseUp:t,children:[i,n?(0,m.jsx)(A.Flex,E(E({background:"mainBackground",position:"fixed",round:!0},n),{},{children:(0,m.jsx)(y.A,{icon:"conversationSubject",label:"Start a conversation",flavour:"hollow",onClick:o})})):null]})};var B=t(81685),C=t(75250),M=t(42849),T=t(41395),I=t(79022),v=(t(42762),t(82326));var _=t(99094),Q=t(6304);const D=["onIntersection"];function x(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function k(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?x(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):x(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const R=(S=A.H1,e=>{let{onIntersection:n}=e,t=(0,i.A)(e,D);const o=(0,a.useRef)();return((e,n)=>{const t=(0,a.useMemo)((()=>new IntersectionObserver((t=>{var o;let[i]=t;n({id:null===e||void 0===e||null===(o=e.current)||void 0===o||null===(o=o.dataset)||void 0===o?void 0:o.id,isIntersecting:i.isIntersecting,boundingClientRect:i.boundingClientRect,intersectionRatio:i.intersectionRatio})}),{threshold:[0,.5,1]})),[e]);(0,a.useEffect)((()=>(null!==e&&void 0!==e&&e.current&&t.observe(e.current),()=>t.disconnect())),[])})(o,n),(0,m.jsx)("div",{ref:o,"data-id":t.id,children:(0,m.jsx)(S,k({},t))})});var S;const P=()=>(0,m.jsx)("img",{height:"40px",src:"data:image/png;base64,".concat(_.EF),alt:"Netdata logo"}),F=e=>{let{reportId:n,definitionId:t,createdAt:o,variables:i,onDefinitionsLoaded:s=r()}=e;const c=(0,C.A)({id:n}),l=(0,M.A)({reportDate:o?new Date(o):null}),{loaded:d,definition:u}=(0,B.fL)({id:t}),{name:h}=u||{},g=(0,I.p_)(i);return(0,a.useEffect)((()=>{d&&s()}),[d,s]),d?(0,m.jsxs)(A.Flex,{column:!0,gap:2,border:{side:"bottom",color:"border"},padding:[4,0],margin:[0,0,4,0],children:[(0,m.jsx)(A.Flex,{children:(0,m.jsx)(P,{})}),(0,m.jsxs)(A.Text,{strong:!0,children:[h," Report"]}),(0,m.jsxs)(A.Flex,{column:!0,gap:1,padding:[0,0,0,2],children:[(0,m.jsxs)(A.TextSmall,{children:["Time period: ",(0,m.jsxs)(A.TextSmall,{strong:!0,children:["Last ",g]})]}),(0,m.jsxs)(A.TextSmall,{children:["Created at: ",(0,m.jsx)(A.TextSmall,{strong:!0,children:l})]}),(0,m.jsxs)(A.TextSmall,{children:["Report ID: ",(0,m.jsx)(A.TextSmall,{strong:!0,children:n})]}),(0,m.jsxs)(A.TextSmall,{children:["Report name: ",(0,m.jsx)(A.TextSmall,{strong:!0,children:c})]})]})]}):null},Y=()=>(0,m.jsx)(A.Flex,{alignItems:"center",gap:2,border:{side:"top",color:"border"},padding:[8,0],margin:[4,0,0,0],children:(0,m.jsx)(A.Text,{fontSize:"14px",children:"Created with \ud83d\udc9a by Netdata."})}),U={margin:[6,0,2,0],fontSize:"18px",color:"text"},N=[v.Z.LOAD_CHART,v.Z.LOAD_SYSTEMD_JOURNAL],j=(e=>n=>n.isPrintable?(0,m.jsx)("div",{"data-block-type":n.type,children:(0,m.jsx)(e,k({},n))}):(0,m.jsx)(e,k({},n)))((e=>{let{id:n,type:t,data:o,index:i,isPrintable:a,onIntersection:s,onFinishFetchingChart:r,report:u={}}=e;const{id:h,variables:f}=u;return t===v.Z.HEADER?a?(0,m.jsx)(A.H2,k(k({},U),{},{children:o}),o):(0,m.jsx)(R,{id:(0,T.Yv)(o,{noLowerCase:!0}),margin:[0===i?0:4,0,0,0],onIntersection:s,children:o},o):t===v.Z.TEXT||t===v.Z.MARKDOWN_TABLE?(0,m.jsx)(p.A,{isPrintable:a,children:o},i):t===v.Z.LOAD_CHART?(0,m.jsx)(l.A,{index:i,chartId:n,id:h,data:o,variables:f,onFinishFetching:r},i):t===v.Z.CHART?(0,m.jsx)(c.A,{data:o},i):t===v.Z.TABLE?(0,m.jsx)(g,{data:o,isPrintable:a},i):t===v.Z.LOAD_SYSTEMD_JOURNAL||t===v.Z.LOAD_WINDOWS_EVENTS?(0,m.jsx)(d.A,{id:n,fn:v.l[t],data:o,onFinishFetching:r,isPrintable:a},i):null})),z=(0,a.memo)((e=>{let{flavour:n,id:t,definitionId:o,createdAt:i,data:s={},variables:A={},onIntersection:c,onReady:l=r(),textSelectionProps:d={}}=e;const u="printable"===n,h=function(){return(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).reduce(((e,n)=>{if(n.type!==v.Z.TEXT)return[...e,n];const t=n.data.split("\n"),o=[];let i=[],a=[],s=!1;for(const r of t)if(r.trim().startsWith("|"))!s&&i.length>0&&(o.push({type:v.Z.TEXT,data:i.join("\n")+"\n"}),i=[]),s=!0,a.push(r);else{if(s&&a.length>0&&(o.push({type:v.Z.MARKDOWN_TABLE,data:a.join("\n")+(""===r?"\n\n":"")}),a=[],s=!1,""===r))continue;i.push(r)}return a.length>0?o.push({type:v.Z.MARKDOWN_TABLE,data:a.join("\n")}):i.length>0&&o.push({type:v.Z.TEXT,data:i.join("\n")}),[...e,...o]}),[])}((null===s||void 0===s?void 0:s.blocks)||[]),g=h.map(((e,n)=>k(k({},e),{},{id:"".concat(e.type,"-").concat(n)}))),[p,,y]=(0,Q.A)(),[b,,E]=(0,Q.A)(!u),B=(e=>{let{isPrintable:n,blocks:t,onFinishFetching:o}=e;const[i,s]=(0,a.useState)([]),A=(0,a.useMemo)((()=>t.reduce(((e,n)=>[...e,...N.includes(n.type)?[n.id]:[]]),[])),[t]),c=(0,a.useMemo)((()=>!A.length||i.length===A.length&&A.every((e=>i.includes(e)))),[i,A]);return(0,a.useEffect)((()=>{c&&"function"===typeof o&&o()}),[c]),(0,a.useMemo)((()=>n?e=>{s((n=>n.includes(e)?n:[...n,e]))}:r()),[n])})({isPrintable:u,blocks:g,onFinishFetching:y});return(0,a.useEffect)((()=>{p&&b&&l()}),[p,b,l]),(0,m.jsxs)(m.Fragment,{children:[u?(0,m.jsx)(F,{reportId:t,definitionId:o,createdAt:i,variables:A,onDefinitionsLoaded:E}):(0,m.jsx)(f.A,{id:t}),(0,m.jsx)(w,k(k({},d),{},{children:g.map(((e,n)=>{let{id:o,type:i,data:a}=e;return(0,m.jsx)(j,{id:o,type:i,data:a,isPrintable:u,onIntersection:c,onFinishFetchingChart:B,report:{id:t,variables:A}},n)}))})),u?(0,m.jsx)(Y,{}):null]})})),H=z},27955(e,n,t){"use strict";t.d(n,{A:()=>M});var o=t(64467),i=t(80045),a=(t(98992),t(54520),t(3949),t(96540)),s=t(42358),r=t(81685),A=(t(72577),t(81454),t(24155)),c=t(74848);const l=()=>(0,c.jsxs)(s.Flex,{column:!0,gap:2,children:[(0,c.jsx)(A.A,{width:"80%"}),(0,c.jsx)(A.A,{width:"100%"})]});var d=t(15505);function u(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function h(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?u(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):u(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const m=(0,t(74891).A)(s.Flex),g=e=>{let{name:n,rawValue:t}=e;return(0,c.jsxs)(s.Flex,{column:!0,gap:2,background:"tooltip",padding:[2],round:!0,children:[(0,c.jsx)(s.Text,{children:n}),Array.isArray(t)&&"*"!==t[0]?(0,c.jsx)(s.Flex,{column:!0,gap:2,children:t.map((e=>(0,c.jsx)(s.Text,{color:"menuItem",children:e},e)))}):null]})},p=e=>{let{id:n,name:t,fieldPrefix:o,variables:i}=e;const r=(0,a.useMemo)((()=>i[(0,d.Gb)(n)]),[n,i]),A=(0,a.useMemo)((()=>Array.isArray(r)?1===r.length&&"*"===r[0]?"All":"".concat(r.length," selected"):r),[r]);return(0,c.jsxs)(s.Flex,{alignItems:"center",gap:2,children:[o?(0,c.jsx)(s.Text,{children:o}):null,(0,c.jsx)(m,{height:"28px",alignItems:"center",padding:[0,3],border:{side:"all",color:"border"},round:!0,cursor:"pointer",tooltip:(0,c.jsx)(g,{name:t,rawValue:r}),tooltipProps:{align:"bottom"},children:(0,c.jsx)(s.Text,{color:"menuItem",children:A})})]})},f=e=>{let{definitionId:n,variables:t}=e;const{loaded:o,definitions:i,error:A}=(0,r.A_)(),d=(0,a.useMemo)((()=>o&&!A?i.find((e=>{let{id:t}=e;return t===n})):null),[n,i]);return o?A?(0,c.jsx)(s.Text,{color:"textLite",children:"string"===typeof A?A:"Failed to load definition"}):d?(0,c.jsx)(s.Flex,{alignItems:"center",gap:2,children:d.variables.map((e=>(0,c.jsx)(p,h(h({},e),{},{variables:t}),e.id)))}):(0,c.jsx)(s.Text,{color:"textLite",children:"No definition found"}):(0,c.jsx)(l,{})};const y=t(51510).default.blockquote.withConfig({displayName:"investigationDetails__Blockquote",componentId:"sc-ebzbrr-0"})(["font-style:italic;padding:0 12px;"]),b=e=>{let{variables:n}=e;const{reportDescription:t}=n||{};return t?(0,c.jsx)(y,{children:t}):"No report description available"};var E=t(99094);const w=["id"];function B(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function C(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?B(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):B(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const M=e=>{let{id:n}=e,t=(0,i.A)(e,w);const o=(0,r.CF)({id:n,nested:!1}),{definitionId:A,variables:l}=o||{},d=(0,a.useMemo)((()=>A===E.oG.investigation?b:A===E.oG.alertInvestigation?null:f),[A]);return A&&Object.values(l||{}).filter(Boolean).length&&d?(0,c.jsxs)(s.Flex,C(C({column:!0,gap:2,background:"panelBg",padding:[4],round:!0},t),{},{children:[(0,c.jsx)(s.Text,{color:"menuItem",strong:!0,children:"Report details"}),(0,c.jsx)(s.Flex,{height:{max:40},overflow:{vertical:"auto"},children:(0,c.jsx)(d,{definitionId:A,variables:l})})]})):null}},46256(e,n,t){"use strict";t.d(n,{A:()=>p,D:()=>g});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(96540),s=t(42358),r=t(74891),A=t(48329),c=t(34113),l=t(74848);const d=["icon","flavour","toolVisible","ref","Component"];function u(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function h(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?u(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):u(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const m=(0,r.A)(s.Icon),g=e=>{let{icon:n,flavour:t,toolVisible:o,ref:s,Component:r}=e,A=(0,i.A)(e,d);const c=(0,a.useMemo)((()=>({name:n,cursor:"pointer",color:"menuItem",width:"18px",height:"18px",tooltipProps:{align:"bottom"},noWrapper:!0})),[t,n]);return r&&o?(0,l.jsx)(r,h(h({ref:s},c),A)):o&&n&&A.onClick?(0,l.jsx)(m,h(h({ref:s},c),A)):null},p=(0,A.A)((0,c.A)(g))},17762(e,n,t){"use strict";t.d(n,{Y:()=>o,o:()=>i});const o={addToConversation:"addToConversation",openReport:"openReport",info:"info",share:"share",download:"download",schedule:"schedule",close:"close",delete:"delete"},i={COMPLETED:Object.values(o),FAILED:[o.delete,o.addToConversation],default:[]}},68726(e,n,t){"use strict";t.d(n,{A:()=>T});var o=t(64467),i=(t(98992),t(54520),t(3949),t(62953),t(96540)),a=t(42358),s=(t(8872),t(30569)),r=t(24609),A=t(19186),c=t(16922),l=t(81685),d=t(58247),u=t(63872);function h(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function m(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?h(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):h(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const g=function(){let{id:e,onSuccess:n,onFail:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const o=(0,r.vt)(),a=(0,A.ID)(),h=(0,l.CF)({id:e}),{childrenReports:g}=h||{},p=!(null===h||void 0===h||!h.rrule),f=p?d.Zp:d.y8,[y,b]=(0,u.A)(),E=(0,s.yF)((0,i.useCallback)(((n,t)=>{t((0,c.Tj)({spaceId:o,roomId:a}),p?n=>m(m({},n),{},{reports:n.reports.reduce(((n,t)=>(t.id===e?g.forEach((e=>{n.push(e)})):n.push(t),n)),[])}):n=>m(m({},n),{},{reports:(n.reports||[]).filter((n=>n.id!==e))}))}),[e,o,a,p,g]));return(0,i.useCallback)((()=>{f({spaceId:o,roomId:a,reportId:e}).then((e=>{let{data:t}=e;y({header:"Success",text:"You have successfully deleted the report."}),E(),"function"===typeof n&&n(t)})).catch((e=>{b(e),"function"===typeof t&&t(e)}))}),[e,o,a,E,n,t,y,b])};var p=t(46256),f=t(48329),y=t(34113),b=t(99728),E=t(6304),w=t(3319),B=t(74848);function C(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function M(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?C(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):C(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const T=(0,f.A)((0,y.A)((e=>{let{reportId:n,flavour:t,toolVisible:o,onDelete:s,onSuccess:r}=e;const A=(0,b.JT)("insights:DeleteReport"),c=g({id:n,onSuccess:r}),[l,,d,u]=(0,E.A)(),{sendLog:h}=(0,w.A)(),m=(0,i.useMemo)((()=>({tooltip:A?"Delete report":"You don't have permissions to delete the report",disabled:!A})),[A]),f=(0,i.useCallback)((()=>{A&&d()}),[A,d]),y=(0,i.useCallback)((()=>{c(),"function"===typeof s&&s(),h({feature:"Insights",description:"Confirm report deletion",reportId:n})}),[n,c,s,h]);return o?(0,B.jsxs)(B.Fragment,{children:[(0,B.jsx)(p.D,M({toolVisible:o,flavour:t,icon:"trashcan",label:"Delete",width:"14px",height:"14px",color:"error",onClick:f,danger:!0},m)),l?(0,B.jsx)(a.ConfirmationDialog,{title:"Delete report?",handleConfirm:y,handleDecline:u,confirmLabel:"Yes, delete",declineLabel:"No",backdropContainerProps:{zIndex:"120","data-drop":"insights-report-item-menu"},message:(0,B.jsxs)(a.Flex,{column:!0,gap:2,children:[(0,B.jsx)(a.TextBig,{children:"You are going to delete a report and this action cannot be reverted."}),(0,B.jsx)(a.TextBig,{children:"Are you sure that you want to delete this report?"})]})}):null]}):null})))},35596(e,n,t){"use strict";t.d(n,{A:()=>un});var o=t(64467),i=(t(98992),t(54520),t(3949),t(62953),t(96540)),a=t(42358),s=t(17178),r=t(47413),A=t(44245),c=t(81685),l=t(96951),d=t(50100),u=t(46256),h=t(17762),m=t(74848);const g=e=>{let{status:n,flavour:t,onClick:o,tooltip:i,logPayload:a={}}=e;return(0,m.jsx)(u.A,{tool:h.Y.addToConversation,status:n,flavour:t,icon:"conversationSubject",color:"primaryAI",label:"Add to conversation",onClick:o,tooltip:i,logPayload:a})};var p=t(68726),f=t(80045),y=(t(33110),t(81454),t(51510));const b=(0,y.default)(a.Box).withConfig({displayName:"variables__VariablesGrid",componentId:"sc-y7si6q-0"})(["display:grid;grid-template-columns:fit-content(120px) auto;column-gap:8px;row-gap:6px;"]),E=(0,y.default)(a.Text).withConfig({displayName:"variables__MonospaceText",componentId:"sc-y7si6q-1"})(["font-family:monospace;"]),w=e=>{let{label:n,value:t}=e;const o=(0,i.useMemo)((()=>Array.isArray(t)?1===t.length&&"*"===t[0]?"All":JSON.stringify(t):t),[t]);return(0,m.jsxs)(m.Fragment,{children:[(0,m.jsxs)(E,{color:"menuItem",children:[n,":"]}),(0,m.jsx)(E,{children:o})]})},B=function(){let{variables:e={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return(0,m.jsx)(b,{children:Object.entries(e).map((e=>{let[n,t]=e;return t?(0,m.jsx)(w,{label:n,value:t},n):null}))})};var C=t(48329),M=t(34113),T=t(6304);const I=["onClick"];function v(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function _(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?v(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):v(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Q=(0,C.A)((0,M.A)((e=>{let{onClick:n}=e,t=(0,f.A)(e,I);const o=(0,i.useRef)(),[s,r,,A]=(0,T.A)(!1),c=(0,i.useCallback)((()=>{"function"===typeof n&&n(),r()}),[n,r]);return(0,m.jsxs)(m.Fragment,{children:[(0,m.jsx)(u.D,_({ref:o,onClick:c},t)),s&&o.current?(0,m.jsx)(a.Drop,{target:o.current,align:{top:"bottom",right:"right"},background:"modalBackground",margin:[2,0,0],round:.5,close:A,onClickOutside:A,onEsc:A,children:(0,m.jsxs)(a.Flex,{width:{min:80,max:200},column:!0,gap:2,padding:[4],children:[(0,m.jsx)(a.TextBig,{strong:!0,children:"Report id"}),(0,m.jsx)(a.Text,{children:t.reportId}),(0,m.jsx)(a.TextBig,{strong:!0,children:"Query details"}),(0,m.jsx)(B,{variables:t.variables})]})}):null]})})));var D=t(21996),x=(t(8872),t(6084));const k=[{id:"name",accessor:"name",header:"Name",cell:e=>{let{getValue:n}=e;return"".concat(n())}},{id:"user",accessor:"user",width:300,header:"Users",cell:e=>{let{getValue:n}=e;const{name:t,avatarURL:o,email:i}=n();return(0,m.jsxs)(a.Flex,{alignItems:"center",gap:2,children:[(0,m.jsx)(x.A,{src:o,title:"".concat(t," - ").concat(i||"email not set")}),(0,m.jsx)(a.TextSmall,{children:t})]})}},{id:"email",accessor:"email",header:"Email",width:300,align:"center",cell:e=>{let{getValue:n}=e;return(0,m.jsx)(a.TextSmall,{children:n()})}}],R=e=>{let{members:n=[],setUserIds:t}=e;const o=(0,i.useCallback)((e=>{const o=Object.entries(e||{}).reduce(((e,t)=>{var o;let[i,a]=t;return[...e,...a?[null===(o=n[i])||void 0===o?void 0:o.id].filter(Boolean):[]]}),[]);t(o)}),[n,t]);return(0,m.jsx)(a.Table,{data:n,dataColumns:k,columnVisibility:{name:!1},enableSorting:!0,enableSelection:!0,onRowSelectionChange:o})},S=()=>(0,m.jsxs)(a.Flex,{gap:2,height:{min:"300px"},alignItems:"center",justifyContent:"center",padding:[8],children:[(0,m.jsx)(a.Icon,{name:"warning_triangle",color:"warning"}),(0,m.jsx)(a.Text,{children:"You need to be signed in to share a report."})]});var P=t(79748);const F=e=>{let{onStartInviting:n}=e;return(0,m.jsx)(a.Flex,{height:{min:"300px"},alignItems:"center",justifyContent:"center",padding:[8],children:(0,m.jsx)(a.Flex,{column:!0,gap:4,width:{max:"500px"},children:(0,m.jsxs)(a.TextBig,{textAlign:"center",lineHeight:1.5,children:["There are no available memebers in this room or there are no memebers that can read a report based on their role. Please"," ",(0,m.jsx)(P.A,{Component:a.TextBig,onClick:n,children:"invite"})," ","some users to this room with the appropriate role."]})})})};var Y=t(62718),U=t(46587),N=t(86706),j=t(82505);function z(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function H(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?z(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):z(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const O={admin:!0,manager:!0},L={admin:!0,manager:!0,troubleshooter:!0,member:!0,observer:!0},G=()=>{const e=(0,N.bj)(),n=(0,j.lb)();return(0,i.useMemo)((()=>e.reduce(((e,t)=>O[t.role]||n.includes(t.id)&&L[t.role]?[...e,H(H({},t),{},{user:{avatarURL:t.avatarURL,deactivated:t.deactivated,name:t.name,email:t.email,id:t.id}})]:e),[])),[e,n])};t(9391);var J=t(24609),q=t(19186),K=t(58247),V=t(63872);function X(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function W(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?X(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):X(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Z=function(){let{id:e,onSuccess:n,onFail:t,onSettle:o}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const a=(0,J.vt)(),s=(0,q.ID)(),[r,A]=(0,V.A)();return(0,i.useCallback)((i=>{let{ids:c,message:l}=i;(c||[]).length&&(0,K.e7)({spaceId:a,roomId:s,reportId:e,payload:W({recipient_ids:c},l?{message:l}:{})}).then((e=>{let{data:t}=e;r({header:"Success",text:"You have successfully shared the report."}),"function"===typeof n&&n(t)})).catch((e=>{A(e),"function"===typeof t&&t(e)})).finally((()=>{"function"===typeof o&&o()}))}),[e,a,s,n,t,r,A])};var $=t(10444),ee=t(88307),ne=t(74891),te=t(3319);const oe=(0,ne.A)(a.Button),ie=e=>{let{reportId:n,startIsInviting:t,closeModal:o}=e;const s=(0,$.J)(),r=(0,U.uW)("isAnonymous"),A=G(),[c,l]=(0,i.useState)([]),[u,h]=(0,i.useState)(""),[g,,p,f]=(0,T.A)(),y=Z({id:n,onSuccess:o,onSettle:f}),{getUrl:b}=(0,d.A)(),E=!!c.length,w=!!A.length,B=(0,i.useMemo)((()=>E?"Share report with ".concat(c.length," members"):"Please select some memebers to share the report.")),{sendLog:C}=(0,te.A)(),M=(0,i.useCallback)((e=>{h(e)}),[h]),I=(0,i.useCallback)((()=>{o(),t()}),[t,o]),v=(0,i.useCallback)((()=>{const e=b(n);(0,ee.C)(e,{text:"URL copied to your clipboard.",icon:"checkmark_s"})(),C({feature:"Insights",description:"Copy report url",reportId:n,url:e})}),[n,b,C]),_=(0,i.useCallback)((()=>{p(),y({ids:c,message:u}),C({feature:"Insights",description:"Share report",reportId:n,userIds:c.join(","),message:u})}),[n,y,c,u,p,C]);return(0,m.jsx)(a.Modal,{"data-testid":"insights-share-modal",backdropProps:{backdropBlur:!0},onClickOutside:o,onEsc:o,children:(0,m.jsxs)(a.ModalContent,{width:{min:"600px",max:s?"90vw":"720px"},height:{min:"400px",max:s?"90vh":"600px"},children:[(0,m.jsxs)(a.ModalHeader,{children:[(0,m.jsx)(a.H3,{children:"Share report"}),(0,m.jsx)(a.ModalCloseButton,{testId:"close-button",onClose:o})]}),(0,m.jsx)(a.ModalBody,{children:g?(0,m.jsx)(Y.au,{}):r?(0,m.jsx)(S,{}):w?(0,m.jsxs)(a.Flex,{column:!0,gap:4,children:[(0,m.jsxs)(a.Flex,{column:!0,gap:2,children:[(0,m.jsx)(a.TextBig,{children:"Select users to share the report."}),(0,m.jsxs)(a.TextBig,{children:["If you cannot find the users you want to share the report with,"," ",(0,m.jsx)(P.A,{Component:a.TextBig,onClick:I,children:"invite"})," ","them and try again, as soon as they join."]}),(0,m.jsx)(a.Flex,{column:!0,padding:[2,0],children:(0,m.jsx)(D.A,{maxLength:2e3,background:"mainBackground",placeholder:"Leave a message for the recipients (max 2000 characters)",value:u,onChange:M})})]}),(0,m.jsx)(a.Flex,{height:"300px",overflow:{vertical:"auto"},children:(0,m.jsx)(R,{members:A,setUserIds:l})})]}):(0,m.jsx)(F,{onStartInviting:I})}),r?null:(0,m.jsx)(a.ModalFooter,{children:(0,m.jsxs)(a.Flex,{alignItems:"center",gap:2,children:[(0,m.jsx)(oe,{flavour:"hollow",onClick:v,tooltip:"Copy report URL",textTransform:"",children:"Copy URL"}),(0,m.jsx)(oe,{flavour:"hollow",onClick:_,disabled:!E||g,tooltip:B,children:"Share"})]})})]})})};var ae=t(83864);const se=["reportId","onClick"];function re(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Ae(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?re(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):re(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ce=(0,C.A)((0,M.A)((e=>{let{reportId:n,onClick:t}=e,o=(0,f.A)(e,se);const[a,,s,r]=(0,T.A)(),[A,,c,l]=(0,T.A)(),d=(0,i.useCallback)((()=>{"function"===typeof t&&t(),s()}),[t,s]);return(0,m.jsxs)(m.Fragment,{children:[(0,m.jsx)(u.D,Ae({onClick:d},o)),a?(0,m.jsx)(ie,{reportId:n,startIsInviting:c,closeModal:r}):null,A&&(0,m.jsx)(ae.d,{onClose:l,isSubmodal:!0})]})})));var le=t(42849),de=t(75250),ue=t(42660),he=(t(3296),t(27208),t(48408),t(64251)),me=t(63950),ge=t.n(me),pe=t(20354),fe=t.n(pe),ye=t(24716);const be=async function(e){let{onSuccess:n=ge(),onError:t=ge()}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const o="#F6F7F7",i=await fe()(e,{useCORS:!0,foreignObjectRendering:!0,width:e.scrollWidth,height:e.scrollHeight,scale:2,backgroundColor:o,ignoreElements:e=>e.hasAttribute("data-noprint"),onclone:e=>{e.querySelectorAll(".dygraph-axis-label").forEach((e=>{e.style="color:#000;"}))}}),a=i.toDataURL("image/png");if(!a||"data:,"===a)return he.wd("html2canvas failed: empty image data."),void t({message:"html2canvas failed: empty image data."});const s={width:i.width+48,height:i.height+24},r=new ye.Ay({orientation:"portrait",unit:"px",format:[s.width,s.height]});return r.setFillColor(o),r.rect(0,0,s.width,s.height,"F"),r.addImage(a,"PNG",24,12,i.width,i.height),n(),r};function Ee(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function we(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Ee(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Ee(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Be={generator:be,fileName:"capture",debug:!1,onStart:ge(),onSuccess:ge(),onError:ge()},Ce=e=>{let{getElement:n,options:t={}}=e;const{generator:o,debug:a,fileName:s,onStart:r,onSuccess:A,onError:c,generatorProps:l={}}=we(we({},Be),t),[d,,u,h]=(0,T.A)(),[m,g]=(0,i.useState)();return[d,(0,i.useCallback)((async()=>{u(),r();const e="function"===typeof n?n():void 0;if(!e)return he.wd("No capture target found."),c({message:"No capture target found."}),void h();const t=e.getBoundingClientRect();if(0===t.width||0===t.height)return he.wd("Capture element has no size."),c({message:"Capture element has no size."}),void h();await document.fonts.ready,a&&document.querySelectorAll("img").forEach((e=>{const n=e.currentSrc||e.src;try{const e=new URL(n),t=window.location.origin&&"null"!==window.location.origin?window.location.origin:"";t&&e.origin!==t&&he.wd("External image may cause issues: "+n)}catch(t){he.wd("Image with invalid src: "+e),h()}}));try{(await o(e,we({onError:e=>{c(e),h()}},l))).save("".concat(s,".pdf")),h(),A()}catch(i){a&&console.log(i),he.Cp(i),h(),g(i),c(i)}}),[n,s,a,r,A,c,u,h,g]),m]},Me=["autoDownload"];function Te(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const Ie=(0,ue.g)(a.ModalBody,"light"),ve=e=>{let{onClick:n}=e;return(0,m.jsx)(a.Icon,{cursor:"pointer",name:"download",color:"textLite",onClick:n})},_e=e=>{let{isLoading:n}=e;const t=n?"Generating report...":"Your download will begin in few seconds";return(0,m.jsx)(a.Flex,{alignItems:"center",justifyContent:"center",position:"absolute",top:0,left:0,right:0,bottom:0,zIndex:999,background:"rgba(0,0,0,0.5)",children:(0,m.jsx)(a.Flex,{padding:[4,6],background:"modalBackground",round:1,children:(0,m.jsx)(a.Text,{fontSize:"16px",children:t})})})},Qe=e=>{let{isDisabled:n,options:t={},renderAccessor:s,children:r}=e;const A=(0,i.useRef)(),c=(0,i.useRef)(),[l,d]=(0,i.useState)(),u=(0,$.J)(),{autoDownload:h}=t,g=(0,f.A)(t,Me),[p,y,b,E]=(0,T.A)(),[,w]=(0,V.A)(),B=function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Te(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Te(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({onSuccess:E},g),[C,M,I]=Ce({getElement:()=>null===c||void 0===c?void 0:c.current,options:B});return(0,i.useEffect)((()=>{let e=!0,t=null;return n||!e||!p||!h||C||null!==A&&void 0!==A&&A.current||(t=setTimeout((()=>{A.current=!0,M()}),2e3)),()=>{e=!1,A.current=!1,t&&(clearTimeout(t),t=null)}}),[n,p,h,C,M]),(0,i.useEffect)((()=>{I&&(d(I),w({message:I}))}),[I,d,w]),(0,m.jsxs)(m.Fragment,{children:[s?s({isLoading:C,onClick:y}):(0,m.jsx)(ve,{onClick:b}),p?(0,m.jsx)(a.Modal,{onClickOutside:E,onEsc:E,children:(0,m.jsxs)(a.ModalContent,{width:{max:u?"90vw":"1024px"},height:{max:"95vh"},children:[!h&&!C||l?null:(0,m.jsx)(_e,{isLoading:C}),(0,m.jsxs)(a.ModalHeader,{children:[(0,m.jsx)(a.H3,{children:"PDF Preview"}),(0,m.jsx)(a.ModalCloseButton,{testId:"close-button",onClose:E})]}),(0,m.jsx)(Ie,{background:"mainBackground",height:"100%",overflow:{vertical:"auto"},children:(0,m.jsx)("div",{ref:c,children:r})}),h?null:(0,m.jsx)(a.ModalFooter,{children:(0,m.jsx)(a.Button,{flavour:"hollow",onClick:M,disabled:C||n,children:"Download"})})]})}):null]})};var De=t(3066),xe=(t(27495),t(25440),t(42762),t(72577),t(25708));const ke=e=>{e&&e.API.events.push(["addFonts",function(){this.addFileToVFS("NotoSans-Regular.ttf","AAEAAAARAQAABAAQR0RFRkByP9MABSJEAAAHnEdQT1PjWEomAAUp4AAArYBHU1VCVhK4IgAF12AAAH0CT1MvMnfRmRwAAAGYAAAAYGNtYXDq6rMGAAAyeAAABwRjdnQgGa8axQAAQ0wAAAD+ZnBnbTYLFgwAADl8AAAHtGdhc3AAFgAjAAUiNAAAABBnbHlmFHM8QQAAdNAABKkAaGVhZAM2yywAAAEcAAAANmhoZWEOUgw4AAABVAAAACRobXR4S/Q09QAAAfgAADB+bG9jYRwsnwgAAERMAAAwhG1heHAOuAU7AAABeAAAACBuYW1lZ1qRGAAFHdAAAARCcG9zdP9pAGYABSIUAAAAIHByZXBmtKnnAABBMAAAAhoAAQAAAAEKPQBYtbFfDzz1AAsIAAAAAADPKrtZAAAAAM8qu1v7MPzaCpIIYgAAAAkAAgABAAAAAAABAAAIjf2oAAAKtvsw97oKkgABAAAAAAAAAAAAAAAAAAAMHwABAAAMIAFSAFQAhAALAAIAEAAXAFwAAAHJA0sAAwABAAMEkQGQAAUACAWaBTMAAAEfBZoFMwAAA9EAZgIACAICCwUCBAUEAgIE4ACC/0AAeP8AAAAhAAAAAE1PTk8AQAAA//0Ijf2oAAAIjQJYIAABn9/XAAAESgW2AAAAIAAEBM0AwQAAAAAEFAAAAhQAAAInAJMDRACFBSsAMwSTAH8GpgBkBdsAbQHNAIUCZgBSAmYAPQRoAFQEkwBmAiUAVAKTAFICJQCTAvoAFASTAGQEkwC2BJMAYgSTAFwEkwArBJMAgQSTAHEEkwBaBJMAZASTAGYCJQCTAiUAPwSTAGYEkwBzBJMAZgN5ABkHMQB3BR0AAAUzAMcFDgB9BdcAxwRzAMcEJwDHBdMAfQXuAMcCtgBSAi//YAT0AMcEMQDHB0IAxwYUAMcGPwB9BNcAxwY/AH0E+gDHBGQAaARzABQF2QC4BM0AAAdxABkEsAAIBIcAAASTAE4CogCkAvoAFAKiADMEkwBOA43//ASkAYMEfQBeBOwArgPXAHEE7ABxBIMAcQLBAB8E7ABxBPIArgIQAKACEP+PBEYArgIQAK4HewCuBPIArgTXAHEE7ACuBOwAcQNOAK4D1QBoAuMAIQTyAKIEEAAABkoAFwQ7ACUEFAACA8MAUAMKADkEaAHpAwoAQgSTAGYCFAAAAicAkwSTALoEkwBCBJMAeQSTAB0EaAHpBBsAeQSkATEGqABkAtsAQgQSAFIEkwBmApMAUgaoAGQEAP/6A20AewSTAGYCzQAxAs0AIwSkAYME/ACuBT0AcQIlAJMBzQAdAs0ATAMCAEIEEgBQBlIARwZSAC4GUgAgA3kAMwUdAAAFHQAABR0AAAUdAAAFHQAABR0AAAcM//4FDgB9BHMAxwRzAMcEcwDHBHMAxwK2ADwCtgBSArb//QK2ADwF1wA9BhQAxwY/AH0GPwB9Bj8AfQY/AH0GPwB9BJMAgwY/AH0F2QC4BdkAuAXZALgF2QC4BIcAAATXAMcFDACuBH0AXgR9AF4EfQBeBH0AXgR9AF4EfQBeBukAXgPXAHEEgwBxBIMAcQSDAHEEgwBxAhD/1AIQAKcCEP+vAhD/6gTXAHEE8gCuBNcAcQTXAHEE1wBxBNcAcQTXAHEEkwBmBNcAcQTyAKIE8gCiBPIAogTyAKIEFAACBOwArgQUAAIFHQAABH0AXgUdAAAEfQBeBR0AAAR9AF4FDgB9A9cAcQUOAH0D1wBxBQ4AfQPXAHEFDgB9A9cAcQXXAMcE7ABxBdcAPQTwAHEEcwDHBIMAcQRzAMcEgwBxBHMAxwSDAHEEcwDHBIMAcQRzAMcEgwBxBdMAfQTsAHEF0wB9BOwAcQXTAH0E7ABxBdMAfQTsAHEF7gDHBPIArgXuAAAE8gASArb/5AIQ/5ICtgAsAhD/3AK2AB4CEP/KArYAUgIQADMCtgBSAhAArgTlAFIEHQCgAi//YAIQ/48E9ADHBEYArgRGAK4EMQDHAhAApQQxAMcCEABcBDEAxwIQAK4EMQDHApoArgQxABsCEP/uBhQAxwTyAK4GFADHBPIArgYUAMcE8gCuBYcAAwYUAMcE8gCuBj8AfQTXAHEGPwB9BNcAcQY/AH0E1wBxB20AfQeRAG8E+gDHA04ArgT6AMcDTgBiBPoAxwNOAH4EZABoA9UAaARkAGgD1QBoBGQAaAPVAGgEZABoA9UAaARzABQC4wAhBHMAFALjACEEcwAUAuMAIQXZALgE8gCiBdkAuATyAKIF2QC4BPIAogXZALgE8gCiBdkAuATyAKIF2QC4BPIAogdxABkGSgAXBIcAAAQUAAIEhwAABJMATgPDAFAEkwBOA8MAUASTAE4DwwBQApwArgSTAL4FHwAABH0AXgcM//4G6QBeBj8AfQTXAHEEZABoA9UAaAS+AQQEvgEEBLYBKwS+AR8CEACgBJ4BbQGWACMEvgECBKAA3wSeAfgEngEQBR0AAAIlAJME8P/QBoH/0AOk/94Gg//iBZP/zgaD/+ICxf/mBR0AAAUzAMcEMQDHBKYAKQRzAMcEkwBOBe4AxwY/AHsCtgBSBPQAxwTdAAAHQgDHBhQAxwRxAEgGPwB9BdkAxwTXAMcEjwBKBHMAFASHAAAGbwBoBLAACAZvAG0GQgBOArYAPASHAAAE7ABxA90AWATyAK4CxQCoBOcAogTsAHEFDACuBCUACgTVAG8D3QBYA98AcQTyAK4EwwBxAsUAqARGAK4EVv/0BPwArgRgAAAD0QBvBNcAcQVGABkE1wCiA98AcQTwAHED1QAUBOcAogXNAHEEaP/pBhcAogZCAHMCxf/qBOcAogTXAHEE5wCiBkIAcwRzAMcF7gAUBDEAxwUlAH0EZABoArYAUgK2ADwCL/9gB30AAgeqAMcF7gAUBPIAxwT+ABcF2QDHBR0AAATsAMcFMwDHBDEAxwWHAAwEcwDHBtUAAgS0AE4GJQDJBiUAyQTyAMcFqgACB0IAxwXuAMcGPwB9BdkAxwTXAMcFDgB9BHMAFAT+ABcGbwBoBLAACAXuAMcFlgCkCEwAxwhUAMcFiQAQBt0AxwUjAMcFEAA9CGoAxwUdAC0EfQBeBMsAdQScAK4DdwCuBKYAJwSDAHEGAAACA+wARAUlAK4FJQCuBDsArgSgAA4F+ACuBRkArgTXAHEE/gCuBOwArgPXAHEDzwApBBQAAgXNAG8EOwAlBQ4ArgTnAJgHLwCuBz8ArgWRACUGOQCuBMEArgP0ADsGsACuBHkAIQSDAHEE8gASA3cArgP2AHED1QBoAhAAoAIQ/+wCEP+PBrwADgcXAK4E8gASBDsArgQUAAIFAgCuBEIAxwN9AK4HcQAZBkoAFwdxABkGSgAXB3EAGQZKABcEhwAABBQAAgQAAFIIAABSCAAAUgNK//wBZgAZAWYAGQIAAD8BZgAZAt8AGQLfABkDVAAbBBkAhQQZAHsDAgCeBlQAkwlqAGQBzQCFA0QAhQJ7AFICewBQA/4AkwEK/nkDLQBtBJMAXASTAEQGNQCcBJMALwaRAIUEKQBvCCkAwwYvACMGQgBOBPQAZgZSAEUGUgAjBlIARwZSAGYEpgBiBKYAKQXpAMUFDABIBJMAZgRkACUFpAB3AxkACgSTAGAEkwBmBJMAZgSTAGYEqgBqBNEAHwTRAB8EngDPAhD/jwQAAYUEAAFvBAABewLNABQCzQA9As0AOQLNADMEAAAACAAAAAQAAAAIAAAAAqoAAAIAAAABVgAABHkAAAIlAAABmgAAAM0AAAAAAAAAAAAACAAAVAgAAFQCEP+PAWYAGQUQAAwEkwAABtkAFwdCAMcHewCuBR0AAAR9AF4HhwABAqoAcwMUAJMHkQAfB5EAHwZGAH0E7gBxBj0AuAVkAKIAAPxNAAD9BwAA/BMAAP0EAAD9MQRzAMcGJQDJBIMAcQUlAK4IJwCDBpwAAAVmABIFFAASB2AAxwXwAK4FdwAABJMACAdvAMcGPQCuBdUAFwUfAAwH3wDHBssArgSyAD0D7AAZBm8AbQYXAKIGQgB9BNcAcQUUAAAEIQAABRQAAAQhAAAJwwB9CI0AcQaRAH0FRgBxCBAAewaHAG8IJwCDBpwAAAUlAHsD8ABxBN8AaAR1AMkEngD4BJ4B3QSeAd8H6QApB6YAKQZIAMcFRgCuBOcALwTBABIE5wDHBOwArgQ9AC8DeQAQBTUAxwREAK4HOwACBl4AAgS0AE4D7ABEBVwAxwR1AK4E9ADHBFIArgT0AC8ERgASBYsADgT8ACUGCgDHBUIArgaFAMcF5wCuCJYAxwbwAK4GOwB9BSMAcQUOAH0D1wBxBHMAEgPNACkEhwAABBAAAASHAAAEEAAABQwACARqACUG5QASBcsAKQWgAKQE+ACYBZYApATZAJgFlgDHBL4ArgbLADcFUgAtBssANwVSAC0CtgBSBtUAAgYAAAIFkQDHBHsArgXBAAIEsgAOBdkAxwT4AK4GDADHBUwArgWWAKQE5wCYB1YAxwYKAK4CtgBSBR0AAAR9AF4FHQAABH0AXgcM//4G6QBeBHMAxwSDAHEF6QB5BIMAaAXpAHkEgwBoBtUAAgYAAAIEtABOA+wARASsAEgD/AAdBiUAyQUlAK4GJQDJBSUArgY/AH0E1wBxBkIAfQTXAHEGQgB9BNcAcQUQAD0D9AA7BP4AFwQUAAIE/gAXBBQAAgT+ABcEFAACBZYApATnAJgEPQDHA3cArgbdAMcGOQCuBD0ALwN5ABAFDgAIBGYAJQSwAAYEOwAlBOwAfwTsAHEHNwB/BzEAbwc9AEgGeQBOBRAASARMAE4H4wAABt8ADggfAMcHVgCuBhQAfQUjAHEFuAASBT8AKQS2AG0D3QBYBbIAAgSwAA4FHQAABH0AXgUdAAAEfQBeBR0AAAR9AF4FHQAABH0AJQUdAAAEfQBeBR0AAAR9AF4FHQAABH0AXgUdAAAEfQBeBR0AAAR9AF4FHQAABH0AXgUdAAAEfQBeBR0AAAR9AF4EcwDHBIMAcQRzAMcEgwBxBHMAxwSDAHEEcwDHBIMAcQRzAE4EgwA/BHMAxwSDAHEEcwDHBIMAcQRzAMcEgwBxArYAUgIQAHsCtgBSAhAAmwY/AH0E1wBxBj8AfQTXAHEGPwB9BNcAcQY/AH0E1wBUBj8AfQTXAHEGPwB9BNcAcQY/AH0E1wBxBkYAfQTuAHEGRgB9BO4AcQZGAH0E7gBxBkYAfQTuAHEGRgB9BO4AcQXZALgE8gCiBdkAuATyAKIGPQC4BWQAogY9ALgFZACiBj0AuAVkAKIGPQC4BWQAogY9ALgFZACiBIcAAAQUAAIEhwAABBQAAgSHAAAEFAACBPAAcQAA+9sAAPxqAAD7jQAA/GoAAPxmAAD8cQAA/HEAAPxxAAD8ZgGkAC0BtgAZBHMAFALjACEE7AASBdcAFATsAMcE7ACuBRQAuATlAKgFDgA/BQ4AfQQGAHEF1wA9BnsAFATsAGgE7ABxBNUAbwRzAHsF6QB5BLQAbwQn/+kF0wB9BI8AAAd5AK4CyQC4ArYARgT0AMcERgCuAhAAHwRW//QIPQC4BhT/6QTyAK4GQgB9CJoAfQbDAHEFewAUBOwArgT6AMcEZABgA9UAXASPAEoCef+NAuMAIQScABQC4wAhBHMAFAZCAEwEzQAABIcAAARWAAIEkwBOA8MAUASsAEgErABxA/wARgP8ADkEjwBiBKwASAPsAEQDtABKBLIArgQhAcEEIQC6BCEAhQInAJMKVgDHCZoAxwiuAHEGYADHBkIAxwQhAK4IRADHCCUAxwcCAK4FHQAABH0AXgK2AAMCEP+vBj8AfQTXAHEF2QC4BPIAogXZALgE8gCiBdkAuATyAKIF2QC4BPIAogXZALgE8gCiBIMAaAUdAAAEfQBeBR0AAAR9AF4HDP/+BukAXgXTAH0E7ABxBdMAfQTsAHEE9ADHBEYArgY/AH0E1wBxBj8AfQTXAHEErABIA/wAHQpWAMcJmgDHCK4AcQXTAH0E7ABxB4EAxwVKAMcGFADHBPIArgUdAAAEfQBeBR0AAAR9AF4EcwCgBIMAcQRzAMcEgwBxArb/hQIQ/zECtgAdAhD/yQY/AH0E1wBxBj8AfQTXAHEE+gCmA04AIwT6AMcDTgCuBdkAuATyAKIF2QC4BPIAogS0AE4D/AAUBe4AxwTyAK4F7gDHBOwAcQVeAHcE1wBxBJMATgPDAFAFHQAABH0AXgRzAMcEgwBxBj8AfQTXAHEGPwB9BNcAcQY/AH0E1wBxBj8AfQTXAHEEhwAABBQAAgMMAA4F9gCuAxsAHQfHAHEHxwBxBR0AAAUOAH0D1wBxBDEAFARzABQD1QBoA8MAUAOWAAQDfQAZBTMAHwXZABQE3QAABHMAxwSDAHECL/9gAhD/jwYjAH0E7ABxBPoAFANOABQEhwAABBQAAgR9AKYE7ABxBOwArwTsAK4D1wBEBD8AYgTsAHEE7ABxBIMAaASDAGgGGwBoA90AWAPsAEQFMwBEBM0AcQIQ/48E7ABvBOwAcQSYAHEEEAAABBD/+gTyAKYE8gCuBPIArgIQABQCxQCoAo8ASgMGAAoCzf/sAg4ArgVCAK4HewCmB3sApgd7AK4E8v/FBPIArgUOAK4E1wBxBukAcQZCAHMFzQBvA04AHwNOAB8DTgAfA04ArgNOAK4CxQCoAsUAJQR5AK4EeQCuA9UAaAIQ/8UCEP/FAhD/4wIQ/x8C4wAtAuMAIQTyABQE1wA9BOcApgQQAAAGSgAXBBQAAAPLAAADwwBQBFYAUAP8AB0D/P/XA2gAGQNoADUDaAAZA8EAcQY/AH0EnACuBM0AXASYAHEFGQCuAhD/PQRGABIDewCuBOwAcQNoABkDaAA1B64AcQfZAHEIQgBxBhIAIQQSACEG5QAhBuUAHwVMAK4E0wCuBBIAAAS4AK4FBP/XBQT/1wQZAJ4EGQCeAfD/xwLdAJ4C3QAxAt0AMQO8AJ4FGwAnA3MAFAFmABkC3wAZAWYAGQFmABkAAP+TAAD/kwJQABACUAAhBJMAZgSTAGYEkwBQBJMAUAAA/64AAP+vAAD+twAA/64AAP7SAAD/MwAA/zMAAP9KAAD/SgAA/5MAAP+TAAD/KQAA/ykAAP8pAAD/KQAA/skAAP8vA28AFAHwAJ4DQgBqA48AKwLyAEQDWACgA1gAoANYAKADWACgA1gAoANYAKADWACgAAD+pwAA/lkC3wAZAAD+qgAA/qoAAP8AAAD/AAAA/zsAAP6TAAD+kwAA/poAAP+CAAD/VgAA/1YAAP9WAAD/VgAA/jcAAP43AAD+LwAA/qcAAP7SAAD+VgAA/sIAAP+XAAD+4AAA/QQAAP8gAAD+kAAA/qcAAP+uAAD/CgAA/sEAAP7BAAD/ZAAA/2YAAP9kAAD/ZgAA/zMAAP8zAAD/TAAA/0wAAP6TAAD/LQAA/5MAAP8pAAD/KQAA/ykAAP7SAAD+lgAAAAAAAP7gAAD/IAAA/34AAP85AAD/WQAA/64AAP6TAAD+fQAA/qcAAP6nAAD+wgAA/sEAAP6IAAD+0gAA/jUAAP5ZAAD+rAAA/pMAAP0fAAD+1wAA/moAAP+TAAD+kwAA/zUAAP59AAD/LwAA/30AAP5XAAD+twAA/68AAP6IAAD/fgAA/sEAAP+xAAD+QgAA/lcAAP8KAAD/QgAA/ocAAP6HAAD+qAAA/poAAP9GAAD9JQAA/1QAAP8hAAD+wQAA/z0AAP9UAAD/VAAA/ocAAAAAAAABBgAA/ycAAP53AAD/PQAA/1QAAP9UAAD/QgAA/0IAAP9UAAD/VAAA/1QAAP7dAAD+zwAA/64AAP62AAD+0QAA/vgAAP7FAAD+0wAA/goAAP8pAAD+0QAA/qgAAP6uAcUAKQHFACkBxQCeA9cARAPXAHED1wBEAiUAPwS4AGYFpP/OBJMAAAXNAG8FM//2BhsAfQTXAHEE5wB9BHEAcQRGAMcDrACwA/b/9gRzAGIEvv/sBCv/Zgg9ALgHewCmBWQAcwTuAHEFNQDHBF4ArgRkAGAEMwAxBKYAKQRUACEGGwB9BNcAcQRzACkD/gAfBTP/9gTXAHED1wBxAhD/jwZCAH0D9gBxA/QAOwTXAMcE7ACuBQ4AfQdCAMcFzwCuBNcACAUOAD8FDgB9BQ4APwAA/ocGPwB9BOwAcQdxABkGSgAXBDcAFAWsABAG6QBoBEoAFAQtAHkExQCwBMUASgO4ALAD6QBWAjMAsAIG/6QEGQCwA4cAKwXTALAE8gCwBRIAeQQtAEgE2QAzBNkAMwTZAAYHkQBqBGgAdQUSAHkFEgB5BAQAsAQ3ADEENwAxA7gAKwTFAKYE8gBOBkwATgTyAE4D/AAUBfgAKQPRAFYD7ABEA64AZAQKACEDhwCwA/wAFATFALAEBACwBTUAbQSiAB0D5QASBT8ADgP4AKQD+AASBGgApANxAKQDcQBvBGQAcQR5AKQCOwBUAd//qgPLAKQDRACkBWIApASTAKQEngCkBLAAcQQUAG0DtgCkA88ApANxACcEaACaBYUAJQN5AFoDeQCLA8UAaAUnAGIDxQCRA8UAaAN9AGgDfQBiAwgAWAMUAEgDxQBmAcsAiQNSAJEFiwCRA8sAkQO2AGgDBABGA7YAaAO2AGgDxQCRAlwAMQPLAIsDywBYBYsAiwMtABIC2QAOA9sAkQM7ABkDtABmBGIAaANoAAIBywCJAqYAkQPLAIsDLQASA9sAkQM7ABkDtgCLBGIAaANoAAIHcQCmBOz/vATsAHECwf/yB3v/4QTy/+EE7P+2A07/uALF/7gD1f/0AuP/1QPDAEYE7ACwBCUApASwAFYH1QAhAhAAFALFAAoE7AAUBMUAFATXABQE7ACuBOwAcQLBAB8GPQBvBEYArgIQAFIHewCuBPIArgTsAK4DTgBSA9UAaANk/8UEEAAABDsAJQPDAFAEfQBeBOwAcQTsAHEEgwBxA/IAWAPsAEQFMQBoAhAAoAPXAEQCEP/FBPIAogPsAEQDxQCRAwQAaANOAFoDtgBoAxQASAJEAC8By//LA8UAZgPLAI0BywAnAkgAjQIhAE4CIQBOAcv/kQHJAJEByf/BAsUAkQWLAJEFiwCLA8v/8gPLAJED3QCRA7YAaARiAGYDAgBiAcv/8gJcADEDywAnA7YARAPHAIsDwwCLAy0AEgL4AFIC+ABSA14AUgMfAC0DqABoAAD+ogAA/n0AAP+FAAD+hwAA/tEAAP7JAAD+0QAA/skAAP5CAAD+QgAA/1oAAP9UAAD+hwUzAMcE7ACuBTMAxwTsAK4FMwDHBOwArgUOAH0D1wBxBdcAxwTsAHEF1wDHBOwAcQXXAMcE7ABxBdcAxwTsAHEF1wDHBOwAcQRzAMcEgwBxBHMAxwSDAHEEcwDHBIMAcQRzAMcEgwBxBHMAxwSDAHEEJwDHAsEAHwXTAH0E7ABxBe4AxwTyAK4F7gDHBPIArgXuAMcE8gCuBe4AWgTyAD4F7gDHBPIArgK2/+QCEP+QArYAKQIQ//YE9ADHBEYArgT0AMcERgCuBPQAxwRGAK4EMQDHAhAAngQx//UCEP/aBDEAxwIQ/9gEMQDHAhD/rwdCAMcHewCuB0IAxwd7AK4GFADHBPIArgYUAMcE8gCuBhQAxwTyAK4GFADHBPIArgY/AH0E1wBxBj8AfQTXAHEGPwB9BNcAcQY/AH0E1wBxBNcAxwTsAK4E1wDHBOwArgT6AMcDTgCuBPoAxwNOAJ4E+gDHA04AngT6AMcDTv/cBGQAaAPVAGgEZABoA9UAaARkAGgD1QBoBGQAaAPVAGgEZABoA9UAaARzABQC4wAhBHMAFALjACEEcwAUAuMAIQRzABQC4wAhBdkAuATyAKIF2QC4BPIAogXZALgE8gCiBdkAuATyAKIF2QC4BPIAogTNAAAEEAAABM0AAAQQAAAHcQAZBkoAFwdxABkGSgAXBLAACAQ7ACUEsAAIBDsAJQSHAAAEFAACBJMATgPDAFAEkwBOA8MAUASTAE4DwwBQBPIArgLjACEGSgAXBBQAAgR9AF4CnACuBawAuATsAHEE7ABxBOwAcQTsAHEE7ABxBOwAcQTsAHEE7ABxBR0AAAUdAAAF7AABBgAAAQXDAAEFwwABBcv/zgXL/84D3QBYA90AWAPdAFgD3QBYA90AWAPdAFgFJQABBRkAAQZWAAEGTAABBiMAAQYjAAEE8gCuBPIArgTyAK4E8gCuBPIArgTyAK4E8gCuBPIArgagAAEGkwABB9EAAQfHAAEHsgABB7IAAQfZ/84Hxf/OAsUAnwLFAJUCxf/+AsX/+wLFADgCxQAPAsX/rwLF/5MDpgABA5oAAQS4AAEErgABBOwAAQTsAAEE9P/OBPT/zgTXAHEE1wBxBNcAcQTXAHEE1wBxBNcAcQa0AAEGxwABB/oAAQfwAAEHsgABB7IAAQTnAKIE5wCiBOcAogTnAKIE5wCiBOcAogTnAKIE5wCiBbwAAQbHAAEG2wABBxf/zgZCAHMGQgBzBkIAcwZCAHMGQgBzBkIAcwZCAHMGQgBzBrYAAQbTAAEH8gABB/IAAQe+AAEHyQABB7L/zgey/84E7ABxBOwAcQPdAFgD3QBYBPIArgTyAK4CxQBEAsUAoATXAHEE1wBxBOcAogTnAKIGQgBzBkIAcwTsAHEE7ABxBOwAcQTsAHEE7ABxBOwAcQTsAHEE7ABxB+EAAAfhAAAIsAABCMUAAQiHAAEIhwABCI//zgiP/84E8gCuBPIArgTyAK4E8gCuBPIArgTyAK4E8gCuBPIArglkAAEJWAABCpYAAQqLAAEKdwABCncAAQqe/84Kif/OBkIAcwZCAHMGQgBzBkIAcwZCAHMGQgBzBkIAcwZCAHMJewABCZgAAQq2AAEKtgABCoMAAQqNAAEKd//OCnf/zgTsAHEE7ABxBOwAcQTsAHEE7ABxBOwAcQTsAHEFHQAABR0AAAUdAAAFHQAAB+EAAAIQAJEEngHnAhAAkQS+AOgEpADyBPIArgTyAK4E8gCuBPIArgTyAK4FIf/NBRL/zQac/80Gjf/NCLIAxwSeAT8EngFoBL4A8gLF/9QCxf/gAsX/wALF/8YCxf+bAsX/pQK2AB4CtgAsA4P/zQOo/80EngErBJ4BaAS+APIE5wCiBOcAogTnAKIE5wCiBNcAogTXAKIE5wCiBOcAogSHAAAEhwAABbD/zQWs/80FhwABBJ4A/ASeAPwEngGTBkIAcwZCAHMGQgBzBkIAcwZCAHMGz//NBoP/zQbR/80Ghf/NCQYATgSeAekCEACeAAD/1QAA/yEAAP/XAAD+TASTAFIEaAEJAt8AGQAA/9cAAP5OAAD/EgAA/xIAAP8SAZoAAAS6AIUEAP/6AiUAkwAA/xIAAP8SAAD/EAAA/xAAAP8QAAD/EgLNACcCzQApAs0AIwN5AFoDfQBoA7YAaANMACcDfQBiBJMASgSTAGQEkwBzB3sArgSTABQGqgCqBVwAFASTAB8EkwAnB8MAMQSTABkEkwAUBdMAfQTdAAAEZAAUBQ4AfQSTAKIAAP4iBqgAZAXfAAoDfwA9BlIALgZSADEELQBGCAABogQAARAIAAGiBAABEAgAAaIEAAEQBAABEAEK/nkCJQCTB9UBmAXBARcEqgBkBNUAngSTAGoE1QIjBNUBBAWq//YFAAHXBaoCjQWq//YFqgKNBar/9gWqAo0Fqv/2Bar/9gWq//YFqv/2Bar/9gWqAdkFqgKNBaoB2QWqAdkFqv/2Bar/9gWq//YFqgKNBaoB2QWqAdkFqv/2Bar/9gWq//YFqgKNBaoB2QWqAdkFqv/2Bar/9gWq//YFqv/2Bar/9gWq//YFqv/2Bar/9gWq//YFqv/2Bar/9gWq//YFqgAABaoAAAWqAAAFqgAABaoC1QWqAGYFqgAABdUAAATVAHsE1QAGAtUAbQLVAG0IAAAAB+wBngfsAZEH7AGeB+wBkQTVAKgEwQBiBNUAsgTVACkE1QApAtUAcwgrAbAIagHRB1YBRgYAAdkGAAFSBD8AOwU/ADsEwQBmBBQAQgQAAMUGAAEQBGgAZgQxABQCEAAUBDH/+gTXABQE+gDHBH0AXgLjACEGCgDHBQoArgU3AMcEeQCuBJMATgPDAFAGIwB9BCUAAAecABkGbwAXBBAAFAREAMcDtgCuBc0AcQK0ACEAAP+TAAD/kwAA/t8AAP7wA+MAjwPjAI8CJwCTAicAkwInAJMAAP7wAAD+8AAAAPkCJQCTA5MAZgInAKYCJwCmAAD+3wAA/tMEAP/6AAD85QAA//YAAPzsAAAAAARWAKAEVgCgBFYAoARWAKAEVgBOBFYAUgRWAE4EVgBOBFYARgMQAEYEVgA1BFYANQRWAFAEVgAtBFYASAMQAC0EVgAlBFYAJQRWACUEVgAnBFYALwMQACUEVgAdBFYAFwRWADUEVgA1BFYALwMQACkEVgBQBFYATARWAEwEVgBMBFYAXgMQAEwEVgCgBFYAoARWAKAEVgCgBFYAUARWAEwEVgBGBFYATARWAEwDEABMBFYALwRWADkEVgA/BFYAPwRWAD8DEAA/BFYANQRWADUEVgA1BFYANQRWADUDEAA1BFYATARWAEwEVgBMBFYATARWAEwDEABoBFYATARWAEYEVgBMBFYATARWAEwDEABMBFYAoARWAKAEVgCgBFYAoARWAFYEVgBWBFYAWARWAFYEVgBWAxAAXARWADcEVgA3BFYANwRWADcEVgA3AxAANwRWAEgEVgBGBFYARgRWAEYEVgBGAxAARgRWAIEEVgCBBFYAOQRWADkEVgA5AxAAOQRWAJEEVgCRBFYAkQRWAJEEVgCRAxAATARWAKAEVgCgBFYAoARWAKAEVgBMBFYATARWAEwEVgBMBFYAUAMQAFAEVgAvBFYANQRWADUEVgAXBFYAHQMQACkEVgAvBFYAJwRWACUEVgAlBFYAJQMQACUEVgBIBFYALQRWAFAEVgA1BFYANQMQAC0EVgBGBFYATgRWAE4EVgBSBFYATgMQAEYEVgCgBFYAoARWAKAEVgCgBOwAcQTsAHEE7ABxBOwAcQTsAHEE7ABxBOwAcQTsAHECxf/iAsX/4gLF/+ICxf/iAsX/1gLF/9YCxf/WAsX/1gTnAKIE5wCiBOcAogTnAKIE5wCiBOcAogTnAKIE5wCiAsX/4gLF/+ICxf/WAsX/1gTnAKIE5wCiBOcAogTnAKIFxQDJBhQAxwWuALoFDACuAzMAagMzAGoDMwBqAzMAagAA/vQAAP6mAAD+0QAA/pMAAP6oAAD+qAAA/tEAAP7RAAD+pgAA/s8AAP6oAAD+zwAA/s8DMwBeAzMAXgMzAGoDMwBqAzMAXgMzAF4DMwBeAzMAXgeRAAAGeQAOBnMAxwaLAK4HRAAtBwoAIQTyAMcEOwCuCBIAAgaTAA4IugDHBwAArgX2AMcFJQCuBdUAxwUjAK4EngCwBJMAKQTNAMEAAAAAAhQAAAIUAAAAAP0wAAD+hQIuAJ8GHQA8Bh0APAgvADwDygAAA8oAAARiAAAGHwAABtIAAAWYAAAEbQAABG0AAARtAAAEbQAACC8APAgvADwILwA8CC8APAYZAAAGiwAABIEAAAS6AAAFlAAABRMAAAWbAAAF8AAABeoAAAXvAAAECAAABK4AAAUFAAAEgAAABcgAAASPAAAFIgBfBCgAAATsAFcEcAAABHAAAASMAAAGKwAABJEAAAWfAF8EyAAABKQAAANGAAADRgAABW0AAAYVAAAGFQAABHMAAAVxAFEEoAAABV8AAARAAAAAAP1MA70ANQISAAACEgAAAhL+QAAA/FwAAP1XAAD94AAA/eAAAP0wAAD8MwAA/LoAAPyTAhL/BgIS/kUCEv7MAhL+pQAA/pEHOAA8AAD+pAAA/LMAAP3fAAD+kwYZAAAGiwAABIEAAAXwAAAFBQAABIAAAAYrAAAEpAAABtIAAAWYAAAAAPxFAAD8RQMIAY4EygGOBGgAmQRoAQcEaACtBGgAuARoAIIEaACsBGgA0wRoAE4EaACUBGgAkQNoAIQCigDXBh0APASBAAAFyAAAA8EAgQUFAAAEkQAAAn4ApgAA/8oAAP7VBBQAagYdADwGHQA8CC8APAPKAAADygAABGIAAAYfAAAG0gAABZgAAARtAAAEbQAABG0AAARtAAAILwA8CC8APAgvADwILwA8BtIAAAWYAAAGHQA8BLoAAAWUAAAFEwAABZsAAAXqAAAF7wAABAgAAASuAAAFyAAABI8AAAUiAF8EKAAABOwAVwSMAAAEkQAABZ8AXwTIAAAFbQAABHMAAAVxAFEEoAAABV8AAARAAAAFsgBfBSEAAAAA/hUAAPxGBLQAAAUvAAACTAAAAs8AAAWUAAADKgAABZsAAASUAAAD2AAABFEAAAQIAAAErgAABQUAAASAAAADvAAAAvMAAAMcAF8EKAAAAuUAVwLdAAACdgAABMYAAAM2AAADhABfAo8AAAKgAAADMgAABAAAAAW7AAADIgAAA0sAUQMDAAADHAAAA9wAAAOuAF8DWQAABLQAAAUvAAACTAAAAs8AAAWUAAADKgAABZsAAASUAAAD2AAABFEAAAQIAAAErgAABQUAAASAAAADvAAAAvMAAAMcAF8EKAAAAuUAVwLdAAACdgAABMYAAAM2AAADhABfAo8AAAKgAAADMgAABAAAAAW7AAADIgAAA0sAUQMDAAADHAAAA9wAAAYZAAAGiwAABIEAAAS6AAAFlAAABRMAAAWbAAAGNgAABmgAAAYrAAAECAAABK4AAAUFAAAEgAAABcgAAARqAAAFIgBfBCgAAATsAFcEcAAABIwAAAYrAAAEkQAABZ8AXwTIAAAEpAAAA0b/6QWVAAAGFQAABHMAAAWWADkEoAAABbkAAARyAAAGDABfBbwAAAYZAAAGiwAABIEAAAS6AAAFlAAABRMAAAWbAAAGNgAABmgAAAYrAAAECAAABK4AAAUFAAAEgAAABcgAAARqAAAFIgBfBCgAAATsAFcEcAAABIwAAAYrAAAEkQAABZ8AXwTIAAAEpAAAA0b/6QWVAAAGFQAABHMAAAWWADkEoAAABbkAAARyAAAEtAAABS8AAAKwAAACzwAABZQAAAWUAAADKgAABZsAAATaAAAFDAAABM8AAAQIAAAECAAABK4AAASuAAAFBQAABQUAAASAAAAEgAAAA/YAAAMOAAADHABfBCgAAALlAFcDFAAAAnYAAATGAAADNgAABEMAXwNsAAACoAAAAzIAAAQAAAAFuwAAAyIAAAQ6ADkDAwAABHEAAARyAAAEsABfBGAAAAS0AAAFLwAAArAAAALPAAAFlAAABZQAAAMqAAAFmwAABNoAAAUMAAAEzwAABAgAAAQIAAAErgAABK4AAAUFAAAFBQAABIAAAASAAAAD9gAAAw4AAAMcAF8EKAAAAuUAVwMUAAACdgAABMYAAAM2AAAEQwBfA2wAAAKgAAADMgAABAAAAAW7AAADIgAABDoAOQMDAAAEcQAABHIAAARAAAAEQAAABGcAAARnAAAEQAAABEAAAARnAAAEZwAABHIAAARyAAAEgAAABksAAAQoAAAEKAAABCgAAASAAAAGSwAABCgAAAQoAAAEKAAAAhL+QAIS/kACEv5AAAD73AAA+8gAAPvIAAD8MwAA/DMAAPwzAAD8ugAA/LoAAPy6AAD8kwAA/JMAAPyTAhL97gIS/doCEv3aAhL+RQIS/kUCEv5FAhL+zAIS/swCEv7MAhL+pQIS/qUCEv6lAAD+FQYdADwDygAABG0AAARtAAAEbQAACC8APAgvADwILwA8CC8APAYdADwGHQA8A8oAAARtAAAEbQAABG0AAAgvADwILwA8CC8APAgvADwGHQA8Bv0AAAnoAAAKAAAABAgAAAQIAAAEYAAABGAAAAgcAAAErgAACQcAAAUFAAAFBQAABQUAAAlSAAAEgAAACNoAAAVzAAAD1wAABbgAAAQoAAAFCQAABlMAAAT1AAAGVAAACJoAAARRAAAF2wAABXkAAAPpAAAEjAAAB2AAAAWRADkENQA5BZEAOQQ1ADkIbwA5BaUAOQSsAAAErAAABMkAAATJAAAGCgAABXQAAAZnAAAGSwAABigAAAYKAAAFmAAABZgAAAWYAAAFmAAACK4AOQVfAAADSwAABV8AAANLAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAACEgAAAhIAAAISAAAAAAAAAhL90AIS/WACEvx/AhL90AIS/WACEvx/AhL90AIS/WACEvx/AhL90AIS/WACEvx/AAD7vAAA+7wAAPzbAAD8AgAA/AIAAPx6AAD8egAA/HoAAPx6AAD8RQAA+zAAAPswAAD7MAAA/EUAAPswAAD9YAWUAAAFmwAABAgAAASuAAAFBQAABIAAAAYVAAAFmAAABZQAAAWbAAAECAAABK4AAAUFAAAEgAAABhUAAAWYAAADqAA5AAD8DwAA/A8AAPtQAAD8fAAA/HwAAPtQAAD8RgAA/EYAAPtQAAD8fAAA/EUAAPxFAAD8RgAA/EYAAPtQAAD8XAAA/VcAAP3gAAD94AAA/EUAAPxFAAD+kQRRAAAGjwAAAAD9YgAA+9wAAPwzAAD8ugAA/JMCEv3uAhL+RQIS/swCEv6lAhL+QAIS/dACEv1gAhL8fwMRASsEBADrBTkARgafAHUCnADrAtQAjALUAHgE2wCWBGgAZgIAAD8CuABkAiUAkwNvAFYEaABiBGgAsgRoAGAEaABSBGgAFwRoAIMEaABxBGgAWgRoAGoEaABqAlsArwJbAFsEaABmBGgAZgRoAGYEJACSAtkA3QNvAFYC2QBuBGgAPANK//wDIABaBFEBXAMCAG4EaABmArgAZAQAAFIIAABSAn8AqAJ+AKYD/wCpA/8ApgZvAJMEaACNBGgAZgRoAGYEaACTAAAAAP1MAAAAAAABAAMAAQAAAAwABAb4AAABgAEAAAcAgAAAAA0AJgA/AFoAXwB6AH4AoACuAK8A1gDXAPYA9wFhAWMBfwGRAZIBnwGhAa4BsAHvAfAB+QH/AhcCGwI2AjcCuwK8AsUCyQLXAt0C8gLzAv8DAwMOAw8DIgMjA28DdQN+A4oDjAOhA84D1gP/BAAEDAQNBE8EUARcBF8EhgSRBRMFHQUnCTkJTQlUCXIJfx3KHgEePR4/Hn8ehR6bHp4e8R7zHvkfFR8dH0UfTR9XH1kfWx9dH30ftB/EH9Mf2x/vH/Qf/iAKIA8gIiAmIC8gMCA0IDogPCA+IEQgXiBwIHkgfyCUIKkgrCC1ILog8CEFIRMhFyEiISYhLiFOIVQhXiGEIZUhqCICIgYiDyISIhUiGiIfIikiKyJIImEiZSMCIxAjISUAJQIlDCUQJRQlGCUcJSQlLCU0JTwlbCWAJYQliCWMJZMloSWsJbIluiW8JcQlzCXPJdkl5iY8JkAmQiZgJmMmZiZrJm8sbSx3LhenIaeM+wT+I/7///3//wAAAAAADQAgACcAQABbAGAAewCgAKEArwCwANcA2AD3APgBYgFkAYABkgGTAaABogGvAbEB8AHxAfoCAAIYAhwCNwI4ArwCvQLGAsoC2ALeAvMC9AMAAwQDDwMQAyMDJAN0A3oDhAOMA44DowPQA9cEAAQBBA0EDgRQBFEEXQRgBIcEkgUUBR4JAQk8CVAJWAl7HQAd/h4CHj4eQB6AHoYenh6gHvIe9B8AHxgfIB9IH1AfWR9bH10fXx+AH7Yfxh/WH90f8h/2IAAgCyASICYgKiAwIDIgOSA8ID4gRCBeIGogdCB/IJAgoCCrIK0guSDwIQUhEyEWISIhJiEuIU0hUyFbIYQhkCGoIgIiBiIPIhEiFSIZIh4iKSIrIkgiYCJkIwIjECMgJQAlAiUMJRAlFCUYJRwlJCUsJTQlPCVQJYAlhCWIJYwlkCWgJaolsiW6JbwlxCXKJc8l2CXmJjomQCZCJmAmYyZlJmombyxgLHEuF6cXp4j7Af4g/v///P//CWsJXwAAC8n/4wuu/+MLkwjN/8ILY//CC0P/wgsk/8ICHP/CAgD/sAH/ALwB/QCvAfsAXgH6/0kB9AAAAfAAAAHvByIB7gAAAev+dgHl/2UB5AAAAeEAZAHg/0EB3wHbAdf90P3P/c79zQAAAYP+Zf2b/ln9mv4X/ZkAAP4JAAD+BgAABEAAbQBrAGkAZgBe6IgAAOhT5BXoUeN66EvoSeR64w7keOfo5+bn5Ofi5+Dn3+fe593n3Ofa59nn2OfW59Xn0+fS4j8AAAAA6/PnruHhAADh2+Ha56Hh0+eC53cAAOGZ51oAAAAA50kAAOcQ4RjhCwAA4P7g++D05rXmseDI5oLmd+Zl4CXgIuAaAADl+QAAAADl6OAD3+cAAN/N5RHlBOT14xfjFuMN4wrjB+ME4wHi+uLz4uzi5eLS4r/ivOK54rbis+Kn4p/imuKT4pLiiwAA4oPie+Jv4hziGeIY4fvh+eH44fXh8twC2//aYGFhYPsAAAprDR8CUAABAAAAAAF8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFUAAAAAAAAAAAAAAFQAAAAAAAAAAAAAAFMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAE8AAAAAAAAAAAAAAAAAAABOgAAATwAAAFOAAAAAAAAAAAAAAAAAAABUgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABJAEsAAAAAAAAAUYAAAAAAAAAAAAAAAABPgAAAAABRAFWAAABVgAAAAAAAAFSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATwAAAE8AT4AAAAAAAABOgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6gAAAAAAAAAACW0L7AvtC+4ABwvvAAkBSQFKASQBJQFLAUwEtAFNAmACYQTkAmIFVgJQAlEFVwVYBVkCUgJoAfYB9wWDApACkQKSApMClAKVApYClwH4AfkJWAlZCVoJWwlcCV0FhAWFBYYFhwZTBlQCVQJWDB4J3wngB9MH1AfVDBMMFAIEB9YCBQwVDBYCCAIJDBcMGAIMB9cCDQIOAg8CEgITB94COwI8B+gCPQI+B+kH7wfwB/ECGQIaB/IH8wIbB/QH9QNyAhwMHQlpAh8IAQIqDBwIDwIsAi0IEAIwCBICMwhQCeECNAI1AloCW0BHW1pZWFVUU1JRUE9OTUxLSklIR0ZFRENCQUA/Pj08Ozo5ODc2NTEwLy4tLCgnJiUkIyIhHxgUERAPDg0LCgkIBwYFBAMCAQAsILABYEWwAyUgEUZhI0UjYUgtLCBFGGhELSxFI0ZgsCBhILBGYLAEJiNISC0sRSNGI2GwIGAgsCZhsCBhsAQmI0hILSxFI0ZgsEBhILBmYLAEJiNISC0sRSNGI2GwQGAgsCZhsEBhsAQmI0hILSwBECA8ADwtLCBFIyCwzUQjILgBWlFYIyCwjUQjWSCw7VFYIyCwTUQjWSCwBCZRWCMgsA1EI1khIS0sICBFGGhEILABYCBFsEZ2aIpFYEQtLAGxCwpDI0NlCi0sALEKC0MjQwstLACwKCNwsQEoPgGwKCNwsQIoRTqxAgAIDS0sIEWwAyVFYWSwUFFYRUQbISFZLSxJsA4jRC0sIEWwAENgRC0sAbAGQ7AHQ2UKLSwgabBAYbAAiyCxLMCKjLgQAGJgKwxkI2RhXFiwA2FZLSyKA0WKioewESuwKSNEsCl65BgtLEVlsCwjREWwKyNELSxLUlhFRBshIVktLEtRWEVEGyEhWS0sAbAFJRAjIIr1ALABYCPt7C0sAbAFJRAjIIr1ALABYSPt7C0sAbAGJRD1AO3sLSywAkOwAVJYISEhISEbRiNGYIqKRiMgRopgimG4/4BiIyAQI4qxDAyKcEVgILAAUFiwAWG4/7qLG7BGjFmwEGBoATpZLSwgRbADJUZSS7ATUVtYsAIlRiBoYbADJbADJT8jITgbIRFZLSwgRbADJUZQWLACJUYgaGGwAyWwAyU/IyE4GyERWS0sALAHQ7AGQwstLCCwAyVFUFiKIEWKi0QhGyFFRFktLCGwgFFYDGQjZIu4IABiG7IAQC8rWbACYC0sIbDAUVgMZCNki7gVVWIbsgCALytZsAJgLSwMZCNki7hAAGJgIyEtLEtTWIqwBCVJZCNFabBAi2GwgGKwIGFqsA4jRCMQsA72GyEjihIRIDkvWS0sS1NYILADJUlkaSCwBSawBiVJZCNhsIBisCBharAOI0SwBCYQsA72ihCwDiNEsA72sA4jRLAO7RuKsAQmERIgOSMgOS8vWS0sRSNFYCNFYCNFYCN2aBiwgGIgLSywSCstLCBFsABUWLBARCBFsEBhRBshIVktLEWxMC9FI0VhYLABYGlELSxLUViwLyNwsBQjQhshIVktLEtRWCCwAyVFaVNYRBshIVkbISFZLSxFsBRDsABgY7ABYGlELSywL0VELSxFIyBFimBELSxFI0VgRC0sSyNRWLkAM//gsTQgG7MzADQAWURELSywFkNYsAMmRYpYZGawH2AbZLAgYGYgWBshsEBZsAFhWSNYZVmwKSNEIxCwKeAbISEhISFZLSywAkNUWEtTI0tRWlg4GyEhWRshISEhWS0ssBZDWLAEJUVksCBgZiBYGyGwQFmwAWEjWBtlWbApI0SwBSWwCCUIIFgCGwNZsAQlELAFJSBGsAQlI0I8sAQlsAclCLAHJRCwBiUgRrAEJbABYCNCPCBYARsAWbAEJRCwBSWwKeCwKSBFZUSwByUQsAYlsCngsAUlsAglCCBYAhsDWbAFJbADJUNIsAQlsAclCLAGJbADJbABYENIGyFZISEhISEhIS0sArAEJSAgRrAEJSNCsAUlCLADJUVIISEhIS0sArADJSCwBCUIsAIlQ0ghISEtLEUjIEUYILAAUCBYI2UjWSNoILBAUFghsEBZI1hlWYpgRC0sS1MjS1FaWCBFimBEGyEhWS0sS1RYIEWKYEQbISFZLSxLUyNLUVpYOBshIVktLLAAIUtUWDgbISFZLSywAkNUWLBGKxshISEhWS0ssAJDVFiwRysbISEhWS0sILACVCOwAFRbWLCAsAJDULABsAJDVFtYISEhIRuwSCtZG7CAsAJDULABsAJDVFtYsEgrGyEhISFZWS0sILACVCOwAFRbWLCAsAJDULABsAJDVFtYISEhG7BJK1kbsICwAkNQsAGwAkNUW1iwSSsbISEhWVktLCCKCCNLU4pLUVpYIzgbISFZLSwAsAIlEbACJUlqILAAU1iwQGA4GyEhWS0sALACJRGwAiVJaiCwAFFYsEBhOBshIVktLCCKI0lkiiNTWDwbIVktLEtSWH0belktLLASAEsBS1RCLSyxAgFCsSMBiFGxQAGIU1pYsQIAQrkQAAAgiFRYsgIBAkNgQlmxJAGIUVi5IAAAQIhUWLICAgJDYEKxJAGIVFiyAiACQ2BCAEsBS1JYsgIIAkNgQlkbuUAAAICIVFiyAgQCQ2BCWblAAACAY7gBAIhUWLICCAJDYEJZuUAAAQBjuAIAiFRYsgIQAkNgQlmxJgGIUVi5QAACAGO4BACIVFiyAkACQ2BCWblAAAQAY7gIAIhUWLICgAJDYEJZWVlZWVmxAAJDVFixAgFCWS0sRRhoI0tRWCMgRSBksEBQWHxZaIpgWUQtLLAAFrACJbACJQGwASM+ALACIz6xAQIGDLAKI2VCsAsjQgGwASM/ALACIz+xAQIGDLAGI2VCsAcjQrABFgEtLLCAsAJDULABsAJDVFtYISMQsCAayRuKEO1ZLSywWSstLIoQ5S1A/wkhMyBVACAB7yABkCABfyABIAEeVR8zA1UfHgEPHj8erx4DW1BaVT9aT1oCWgFYVVlQWFUwWEBYUFiwWARXUFZVIFYB8FYBVgFUVVVQVFVwVAEfVAEwVEBUgFTQVOBUBTBNAU0CTlVHZEZVP0avRgJGAUtVSlBJVUkBS1VPUE5VM04BTgFLVUxQS1UfSwEPSz9Lr0sDU1BSVTtSAVIBUFVRUFBVNyQBfmFkH1h9AXdzHh92c0EfdXMyH3RzMh+XcwG4cwHYcwEZMxhVBzMDVQYD/x9taRkfbGkmH2tpPR9qaUgfp2kBWiYBCCZIJgJIJogmyCYDfyOPI88jAxMzEkBtVQUBA1UEMwNVHwMBDwM/A68DA2RdNB94YwFiXSMfYV0zH2BdKh9fXSofXl0zH7hdyF0C2F3oXQIcZBtVFjMVVRAzD1UPD08PAh8Pzw8CDw//DwIGAgEAVQFkAFVvAH8ArwDvAAQQAAGAFgEFAbgBkLFUUysrS7gH/1JLsAlQW7ABiLAlU7ABiLBAUVqwBoiwAFVaW1ixAQGOWYWNjQBCHUuwMlNYsGAdWUuwZFNYsEAdWUuwgFNYsBAdsRYAQllzcysrXnN0dSsrK3N0KysrKytzK3N0Kysrc3N0dHQrKysrKysrc3R1KysrK3MrcysrcytzdCsrcysrKytzKytzc3R0KytzdCtzKytzK3N0Kytzc3N0KxheAAAGFAALAFAFtgAXAHUFtgAXAAAAAAAAAAAAAAAAAAAESAAUAAAAAP/sAAAAAP/sAAAAAP/sAAD+FP/2AAAFtgAT/JT/7f5//mr+vP9K/gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAEzAAkAAAD2AA4FtgAN/rT8/v/0/2D/9AMCAAwBh//yA0AADgI1//MCqgANAAAAlQCPAIcAfQCcAKQA5QCLAAAAAAAAAAAAogCmAJoAjwCDAGoAAAAAAAAAAACZAJ4ApACRAIUAAAAAAAAAAAAAARUAmwAAAAAAAAAAAEgAAABIAAAASAAAAEgAAAC8AAABCAAAAfwAAAMEAAAD6AAABOwAAAUgAAAFeAAABdAAAAZcAAAGwAAABwQAAAdIAAAHjAAAB8AAAAhEAAAInAAACSgAAAoAAAAKjAAACygAAAvwAAAMPAAADSAAAA3oAAAOXAAADtAAAA80AAAPlAAAD/wAABC4AAAR0AAAEkAAABMQAAATkAAAFAQAABSMAAAU/AAAFaAAABYUAAAWfAAAFtAAABdAAAAXfAAAF/gAABhsAAAY+AAAGXAAABokAAAauAAAG2gAABuwAAAcGAAAHGQAABzoAAAdVAAAHaQAAB4MAAAeVAAAHogAAB7QAAAfFAAAH0AAAB+QAAAgXAAAIQQAACF8AAAiJAAAItQAACNYAAAkHAAAJJgAACT8AAAlhAAAJgAAACYwAAAm3AAAJ1QAACfUAAAogAAAKSQAACmQAAAqNAAAKsAAACs0AAAriAAALBwAACyIAAAtCAAALXAAAC4wAAAuYAAALxgAAC+UAAAvlAAAMAgAADC4AAAxcAAAMkgAADMMAAAzXAAANFwAADTcAAA13AAANoQAADcEAAA3UAAAN2AAADiAAAA4zAAAOTgAADm8AAA6PAAAOugAADs4AAA7vAAAPBwAADxgAAA80AAAPSAAAD2UAAA+FAAAPkAAAD5sAAA+mAAAP2QAAD+UAAA/xAAAP/QAAEAkAABAWAAAQTAAAEHwAABCGAAAQkgAAEJ4AABCqAAAQtwAAEMMAABDPAAAQ2wAAEOgAABEcAAARKAAAETQAABFAAAARTAAAEVgAABFlAAARggAAEbwAABHIAAAR1AAAEeAAABHtAAAR+QAAEhsAABJSAAASXAAAEmUAABJvAAASeQAAEoMAABKNAAAS2AAAEuIAABLsAAAS9QAAEv4AABMIAAATEgAAExwAABMmAAATMAAAE20AABN2AAATgAAAE4kAABOTAAATnAAAE6YAABPOAAAUAwAAFA0AABQWAAAUIAAAFCoAABQzAAAUXwAAFGkAABR4AAAUggAAFI4AABSYAAAUogAAFKwAABS4AAAUwQAAFM0AABTWAAAU4gAAFOwAABT4AAAVAQAAFQ0AABUXAAAVGwAAFVgAABVmAAAVbwAAFXsAABWEAAAVkAAAFZoAABWkAAAVrgAAFboAABXDAAAVzwAAFdgAABXkAAAV7QAAFfkAABYDAAAWDQAAFhcAABYjAAAWLwAAFl4AABaRAAAWnQAAFqcAABa1AAAWvwAAFssAABbVAAAW3wAAFugAABb0AAAXAAAAFwoAABcVAAAXIQAAFy0AABc3AAAXQQAAF14AABdqAAAXdgAAF4AAABeKAAAXlQAAF54AABeoAAAXsgAAF9IAABfsAAAX+AAAGAEAABgLAAAYFQAAGCEAABgqAAAYNAAAGFkAABh/AAAYjQAAGJcAABijAAAYrQAAGLoAABjEAAAY/AAAGT8AABlLAAAZVAAAGV4AABloAAAZdAAAGX4AABmKAAAZkwAAGZ8AABmoAAAZsgAAGbwAABnIAAAZ0QAAGdsAABnkAAAZ8AAAGfkAABoaAAAaSQAAGlUAABpeAAAabAAAGnUAABqBAAAaiwAAGpoAABqkAAAasQAAGrsAABrFAAAazwAAGtsAABrlAAAa8QAAGvoAABsHAAAbEwAAGxwAABsoAAAbMgAAGz4AABtIAAAbXQAAG4YAABvMAAAcJQAAHDEAABw7AAAcRwAAHFAAABxaAAAcZAAAHHwAAByUAAAcowAAHLwAABzQAAAc7wAAHQUAAB0lAAAdQgAAHVQAAB18AAAdiAAAHYwAAB2YAAAdpAAAHbAAAB27AAAdxwAAHdIAAB3dAAAd4QAAHeUAAB31AAAd+QAAHf0AAB4BAAAeBQAAHjgAAB48AAAeQAAAHlUAAB5ZAAAeXQAAHn4AAB6CAAAelQAAHpkAAB65AAAevQAAHsEAAB76AAAe/gAAHyUAAB9SAAAfXwAAH2wAAB91AAAffgAAH4cAAB+RAAAfmwAAH84AACADAAAgHwAAIFIAACCHAAAgsgAAINAAACEFAAAhGwAAIR8AACFIAAAhTAAAIWIAACGeAAAhogAAIcEAACHmAAAiDAAAIjAAACJLAAAiaAAAIpgAACLDAAAi7AAAIxwAACMmAAAjMAAAIzkAACNCAAAjTAAAI1kAACOBAAAjjQAAI7cAACO7AAAjvwAAI8wAACPQAAAkBgAAJDcAACRYAAAkZAAAJHAAACSIAAAkjAAAJLYAACS6AAAkvgAAJOEAACTlAAAlCAAAJT0AACVYAAAlZAAAJX0AACWZAAAlnQAAJaEAACWlAAAlqQAAJa0AACWxAAAltQAAJdUAACXZAAAl3QAAJfYAACYRAAAmKQAAJkYAACZvAAAmmgAAJr8AACbvAAAnJQAAJ0sAACdPAAAngQAAJ7cAACfHAAAn6gAAJ+4AACgRAAAoRAAAKF8AAChoAAAogAAAKJoAACi6AAAo3AAAKOAAACjzAAAo9wAAKPsAACkOAAApEgAAKUMAAClHAAApYAAAKXsAACmUAAApsgAAKd8AACoOAAAqOAAAKmYAACqcAAAqvwAAKskAACsGAAArDwAAKz4AACtCAAArRgAAK1AAACtUAAArjAAAK78AACvyAAAr+wAALAQAACwcAAAsMAAALEQAACxQAAAsWgAALGYAACxwAAAsfQAALIgAACyUAAAsngAALK8AACzAAAAs0QAALO4AACz9AAAtCwAALRwAAC0rAAAtQwAALVoAAC10AAAtkQAALcIAAC3SAAAt9gAALkMAAC5HAAAuSwAALl8AAC50AAAufwAALowAAC6rAAAu0wAALw8AAC9KAAAvngAAL9YAADAEAAAwRAAAMGsAADBvAAAwlwAAMKMAADCvAAAwuwAAMMcAADD4AAAxEgAAMSUAADFDAAAxUgAAMWUAADGaAAAxswAAMe4AADITAAAyMQAAMlEAADJpAAAycwAAMn0AADKUAAAyqQAAMroAADLMAAAy4AAAMwEAADMqAAAzPAAAM3IAADNyAAAzcgAAM3IAADNyAAAzcgAAM3IAADNyAAAzcgAAM3IAADNyAAAzcgAAM3IAADNyAAA0IwAANFAAADRaAAA0XgAANKAAADS+AAA08gAANP4AADUIAAA1EgAANRwAADUnAAA1RwAANWYAADV2AAA1hQAANbQAADXmAAA2CwAANjkAADY+AAA2QwAANkgAADZiAAA2cQAANn0AADaJAAA2kwAANp0AADbbAAA3BAAANzcAADdsAAA3owAAN94AADgHAAA4OAAAOGsAADilAAA43gAAOR4AADlfAAA5pQAAOgAAADpZAAA6XQAAOmEAADqTAAA6yAAAOuUAADsDAAA7EAAAOxsAADtXAAA7YQAAO5sAADvTAAA8OQAAPJsAADynAAA8sQAAPNMAADz1AAA9GQAAPTYAAD1UAAA9aQAAPX0AAD37AAA+TAAAPoAAAD60AAA+6QAAPyIAAD9RAAA/iAAAP6cAAD/GAAA/8gAAQBwAAEBFAABAbwAAQHkAAECDAABApgAAQMUAAEDtAABBEwAAQT4AAEFoAABBiQAAQaYAAEHLAABB9QAAQhgAAEI/AABCbgAAQpoAAELgAABDJQAAQy8AAEM5AABDUgAAQ2sAAENvAABDiQAAQ6oAAEPOAABD8QAARBUAAEQ0AABEUwAARHUAAESXAABEvwAAROgAAEUEAABFHwAARV0AAEWUAABF1wAARhMAAEYXAABGIwAARi0AAEZZAABGggAARqYAAEbIAABG8QAARxwAAEdDAABHbgAAR5AAAEeyAABH2gAASAMAAEgHAABIEwAASB0AAEgqAABINAAASDgAAEg8AABISAAASFEAAEh9AABIgQAASI4AAEiYAABIpQAASK8AAEi8AABIxgAASO4AAEkWAABJJQAASS4AAEk7AABJRQAASVIAAElcAABJYAAASWQAAElxAABJewAASYgAAEmTAABJoQAASaoAAEm3AABJwQAASc4AAEnYAABJ5QAASe8AAEoFAABKGwAASigAAEozAABKPQAASmoAAEqUAABKvgAASuoAAEsWAABLNgAASzoAAEtrAABLoQAAS9gAAEwJAABMOgAATGUAAEyQAABMuQAATOcAAE0YAABNQwAATWsAAE2MAABNrQAATeIAAE3mAABODwAATjcAAE5BAABOSwAATlgAAE5iAABOcAAATnsAAE6JAABOlAAATqIAAE6tAABO9wAATwIAAE8SAABPIAAATy4AAE85AABPRwAAT1IAAE9hAABPbAAAT7oAAE/FAABP1QAAT+MAAE/tAABP9wAAUAQAAFAOAABQGgAAUCMAAFAxAABQOwAAUEkAAFBWAABQZAAAUG4AAFC/AABQyQAAUNoAAFDnAABQ9QAAUP8AAFEJAABREwAAUR0AAFEnAABRNAAAUT4AAFFMAABRVwAAUWUAAFFvAABRfQAAUYgAAFHbAABR5QAAUfYAAFIEAABSEAAAUhkAAFIlAABSLwAAUjwAAFJGAABSUgAAUlsAAFJlAABSbwAAUnkAAFKDAABSkAAAUpwAAFKoAABSsQAAUr0AAFLHAABS1AAAUt4AAFLqAABS8wAAUv0AAFMHAABTEQAAUxsAAFMoAABTMgAAUz4AAFNHAABTUQAAU2sAAFOLAABTrAAAU94AAFQPAABUMAAAVFEAAFSDAABUsgAAVMYAAFTZAABU4wAAVO0AAFUpAABVaQAAVW0AAFWaAABVwQAAVegAAFYIAABWOAAAVmAAAFZkAABWjQAAVrUAAFbjAABXFgAAVzcAAFc7AABXcAAAV5UAAFfMAABX+AAAWCQAAFg6AABYYQAAWIkAAFiwAABYzAAAWQUAAFkwAABZVgAAWVoAAFleAABZkgAAWcIAAFnsAABaIgAAWksAAFp2AABaoAAAWqQAAFrSAABa/gAAWxkAAFtEAABbXwAAW4wAAFutAABbygAAW/IAAFwaAABcRgAAXG4AAFyWAABcvQAAXPIAAF0lAABdTAAAXXAAAF2eAABdwwAAXc8AAF3WAABd/wAAXgMAAF4TAABeIgAAXjAAAF46AABeRAAAXk4AAF5YAABeYgAAXmwAAF54AABeggAAXo4AAF6YAABepAAAXq4AAF66AABewwAAXtEAAF7cAABe7AAAXvcAAF8HAABfEgAAXyIAAF8tAABfXAAAX2oAAF91AABffwAAX4oAAF+ZAABfowAAX9wAAGAfAABgKwAAYDQAAGBAAABgTAAAYFYAAGBgAABgcgAAYIAAAGCMAABglgAAYKAAAGCqAABgtAAAYMAAAGDJAABg9QAAYRsAAGEnAABhMQAAYT4AAGFJAABhVQAAYV8AAGFsAABhdwAAYYMAAGGNAABhmgAAYaUAAGGxAABhuwAAYcgAAGHTAABh3wAAYekAAGH2AABiAQAAYg0AAGIXAABiJAAAYi8AAGI7AABiRQAAYkkAAGJ6AABihgAAYpIAAGKwAABitAAAYuUAAGMYAABjIgAAYyYAAGMyAABjPAAAY0YAAGNQAABjXgAAY2kAAGN4AABjgwAAY48AAGOZAABjpAAAY64AAGO8AABjxQAAY/EAAGQuAABkZgAAZKMAAGThAABlFQAAZVAAAGWGAABlowAAZcgAAGYBAABmJgAAZkQAAGZgAABmowAAZtAAAGbUAABnFAAAZ2AAAGeBAABnsQAAZ+gAAGgaAABoRAAAaGwAAGiUAABoxgAAaM4AAGj1AABo/QAAaS4AAGlMAABpgwAAabgAAGnrAABp8QAAaiAAAGpdAABqYQAAamUAAGqnAABq3QAAav4AAGs4AABrPAAAa2IAAGuSAABrygAAa+kAAGwPAABsPQAAbGYAAGxqAABshAAAbLAAAGzYAABs7QAAbRoAAG0iAABtTwAAbYEAAG2nAABtzwAAbekAAG3tAABuKAAAblgAAG5cAABuZAAAbn4AAG6jAABuvgAAbuAAAG71AABvCwAAbzAAAG85AABvbgAAb4sAAG+5AABv1gAAcAMAAHAvAABwUgAAcIUAAHCwAABw0QAAcOYAAHEKAABxKQAAcT0AAHFjAABxlAAAcZgAAHHfAABx+wAAcgEAAHIeAAByPQAAcksAAHJPAAByhAAAcrUAAHK5AABy6wAAcwoAAHMaAABzTwAAc3kAAHOjAABz3gAAdCoAAHR+AAB0uwAAdPQAAHVFAAB1ggAAdbIAAHXQAAB2BgAAdicAAHZOAAB2fQAAdpoAAHa8AAB23QAAdvUAAHcMAAB3MAAAd1UAAHd2AAB3lAAAd5gAAHecAAB3oAAAd6QAAHe8AAB31AAAd+4AAHf0AAB3+QAAd/4AAHgPAAB4GgAAeB4AAHgjAAB4KAAAeDQAAHg+AAB4QgAAeEYAAHhZAAB4ZQAAeGoAAHhvAAB4cwAAeHcAAHh7AAB4hAAAeJcAAHibAAB4xgAAeNIAAHj1AAB5EQAAeSsAAHk5AAB5SgAAeVwAAHltAAB5ewAAeYkAAHmbAAB5qAAAebQAAHm4AAB5yAAAedkAAHnpAAB5+QAAegcAAHodAAB6NAAAek4AAHpTAAB6XwAAemsAAHp2AAB6gQAAepAAAHqbAAB6qwAAerAAAHq1AAB6wgAAescAAHrMAAB60QAAetUAAHraAAB63wAAeuQAAHrvAAB6/gAAex0AAHszAAB7PwAAe0sAAHtXAAB7YwAAe3MAAHuDAAB7kQAAe58AAHusAAB7uQAAe8oAAHvZAAB76AAAe/4AAHwIAAB8FwAAfCYAAHwxAAB8OwAAfEAAAHxFAAB8SgAAfFYAAHxjAAB8fgAAfIsAAHyYAAB8owAAfK4AAHy6AAB8xAAAfM0AAHzSAAB87QAAfPYAAHz/AAB9CQAAfRMAAH0kAAB9MwAAfUMAAH1eAAB9dAAAfY0AAH2YAAB9nQAAfaIAAH2nAAB9rgAAfbgAAH29AAB9zAAAfdEAAH3eAAB96wAAfhEAAH5EAAB+egAAfooAAH6XAAB/TAAAf1sAAH9fAAB/gQAAf5AAAH+jAAB/tgAAf9gAAH/cAAB/4AAAf/YAAIAhAACAOgAAgE4AAIBZAACAZgAAgG8AAICMAACApgAAgLMAAIDZAACA+wAAgRAAAIEmAACBPAAAgVAAAIFxAACBigAAgasAAIG/AACB1QAAgeYAAIH4AACCCAAAghkAAIIeAACCIgAAgjEAAIJBAACCRQAAgoUAAIKRAACCngAAgqIAAILaAACDAwAAgycAAINHAACDcQAAg5MAAIO2AACD4wAAhBUAAIRBAACEbgAAhKEAAITVAACE/QAAhSUAAIVLAACFbwAAhaYAAIXbAACGCQAAhjkAAIZtAACGmQAAhsIAAIbkAACHEQAAh0gAAIdMAACHUAAAh1QAAIdYAACHXAAAh2AAAIdkAACHaAAAh4gAAIemAACH1gAAh9oAAIfnAACH9AAAiA4AAIgSAACIFgAAiBoAAIgeAACIPAAAiGoAAIi2AACI7QAAiQsAAIknAACJXQAAiX4AAImyAACJygAAid8AAIn7AACKHAAAijoAAIpVAACKdgAAipMAAIqzAACK0gAAiwYAAItMAACLegAAi5AAAIumAACLxgAAi+sAAIwQAACMIgAAjDsAAIxaAACMlgAAjM4AAIzhAACNAQAAjRsAAI1CAACNbAAAjZEAAI2hAACNtAAAjccAAI3LAACN8AAAjgwAAI4oAACOUQAAjnwAAI6vAACOyAAAjuQAAI8AAACPIQAAjz0AAI9WAACPawAAj4gAAI+XAACPtQAAj88AAI/oAACQBQAAkDAAAJBNAACQbgAAkIAAAJCXAACQtwAAkOQAAJEQAACRMwAAkXUAAJGZAACRvgAAkeUAAJIJAACSOAAAkmYAAJKSAACSrAAAkssAAJLyAACTEwAAky4AAJNHAACTWQAAk2oAAJOOAACTrQAAk8gAAJPjAACUCgAAlB4AAJRCAACUbQAAlIYAAJS0AACU3gAAlQYAAJUfAACVNwAAlT4AAJVFAACVTQAAlVQAAJV4AACVgAAAlYcAAJXDAACWDQAAllkAAJaZAACW7gAAlzAAAJd6AACXtAAAl+kAAJgnAACYZQAAmKMAAJjYAACY9wAAmSsAAJl3AACZkQAAmbUAAJn0AACaJAAAmmQAAJqaAACa0gAAmwAAAJtCAACbcAAAm4kAAJvCAACb7gAAnCUAAJxOAACchgAAnLUAAJzaAACdAwAAnSYAAJ1pAACdngAAndoAAJ4PAACeUAAAno8AAJ7LAACe8wAAnxsAAJ9DAACfbwAAn6AAAJ/DAACf3QAAoBcAAKBKAACgTgAAoGwAAKCLAACgjwAAoKsAAKDVAACg6QAAoQIAAKErAAChXQAAoXAAAKGEAAChkwAAocEAAKHpAACiCwAAoi4AAKJIAACicwAAoqQAAKLRAACi6wAAoxAAAKNBAACjaAAAo4AAAKOeAACjsgAAo8oAAKPsAACkHwAApEIAAKRvAACkkQAApLAAAKTWAACk7AAApPsAAKUKAAClFgAApSIAAKU5AAClUAAApWMAAKV0AACllgAApaIAAKWsAACltgAApcAAAKXOAACl3AAApewAAKX5AACmBQAApg8AAKYZAACmIwAApjEAAKY/AACmSQAAplMAAKZiAACmcQAApoIAAKaMAACmnQAApqcAAKa2AACmxQAAptYAAKbnAACm9wAApwQAAKcQAACnHAAApyoAAKczAACnPwAAp0sAAKdVAACnXwAAp2wAAKd5AACnggAAp4sAAKeaAACnqgAAp7oAAKfLAACn2wAAp+YAAKfyAACn/gAAqAgAAKgSAACoIAAAqC4AAKg4AACoQgAAqFUAAKhoAACodgAAqIQAAKiTAACoogAAqK4AAKi4AACowgAAqMwAAKjYAACo4gAAqOwAAKj2AACpBAAAqRIAAKkhAACpMAAAqT8AAKlKAACpWgAAqWUAAKl2AACpgQAAqZIAAKmdAACpqQAAqbMAAKm/AACpyQAAqdUAAKnfAACp6QAAqfMAAKoHAACqFQAAqiMAAKoxAACqPQAAqkcAAKpRAACqWwAAqmgAAKpyAACqfwAAqokAAKqZAACqqAAAqrQAAKrBAACqywAAqtUAAKrjAACq8QAAqwAAAKsPAACrHwAAqy8AAKtAAACrUQAAq2AAAKtvAACrfgAAq4gAAKubAACrpgAAq7IAAKu7AACrxQAAq88AAKvbAACr5QAAq+8AAKv5AACsBQAArA8AAKwcAACsJgAArDIAAKw8AACsSAAArFEAAKxbAACsZQAArHMAAKyBAACsjwAArJoAAKykAACsrgAArLgAAKzEAACs9AAArP4AAK0IAACtEgAArRwAAK0mAACtMAAArToAAK1EAACtTwAArVoAAK1nAACtdAAArYEAAK2OAACtngAAra4AAK24AACtwgAArcwAAK3WAACt4AAAreoAAK32AACuAgAArg8AAK4cAACuKQAArjYAAK5AAACuSgAArlQAAK5eAACuaAAArnIAAK58AACuhgAArpIAAK6eAACuqwAArrgAAK7FAACu0gAAruIAAK7xAACu+gAArwQAAK8OAACvGAAAryIAAK8sAACvNgAAr0EAAK9NAACvWQAAr2YAAK9zAACvgAAAr40AAK+cAACvqwAAr7UAAK+/AACvyQAAr9MAAK/dAACv5wAAr/MAAK//AACwDAAAsBkAALAmAACwMwAAsD0AALBHAACwUQAAsFsAALBlAACwbwAAsHkAALCDAACwjwAAsJwAALCpAACwuAAAsMIAALDMAACw1wAAsOEAALDrAACw9QAAsP8AALEJAACxFQAAsSEAALEuAACxOwAAsUgAALFVAACxZQAAsXUAALF/AACxiAAAsZIAALGbAACxpQAAsa4AALG4AACxwgAAscwAALHVAACx3wAAsegAALHyAACx/AAAsgoAALIZAACyKAAAsjcAALJFAACyUwAAsmEAALJwAACygAAAspAAALKiAACytAAAssYAALLYAACy7AAAswAAALMOAACzHAAAsyoAALM5AACzRwAAs1UAALNjAACzcQAAs4IAALOTAACzpQAAs7cAALPJAACz2wAAs+8AALQCAAC0EQAAtCAAALQvAAC0PwAAtE4AALRdAAC0bQAAtH0AALSOAAC0nwAAtLEAALTDAAC01QAAtOcAALT8AAC1EQAAtRsAALUlAAC1MwAAtT0AALVLAAC1VQAAtWMAALVvAAC1fQAAtYkAALWVAAC1nwAAtbsAALXTAAC12gAAtd4AALYZAAC2JwAAtjEAALY+AAC2RwAAtlQAALZgAAC2bAAAtngAALaEAAC2jgAAtq8AALbPAAC2/QAAtwcAALcRAAC3HAAAtycAALcxAAC3PAAAt0gAALdWAAC3YgAAt24AALePAAC3sAAAt90AALfnAAC38QAAt/wAALgHAAC4EQAAuBsAALglAAC4MAAAuDwAALhKAAC4VgAAuGIAALhuAAC4lgAAuL4AALjRAAC44AAAuOoAALj5AAC5AwAAuRIAALkeAAC5KQAAuTUAALlAAAC5SgAAuV0AALl5AAC5ggAAuZYAALmnAAC5twAAucIAALnJAAC54QAAuewAALn3AAC6BgAAuhkAALosAAC6LAAAujIAALo2AAC6igAAuqAAALq0AAC6wgAAutMAALrnAAC6+wAAuxYAALs9AAC7ZAAAu2wAALt0AAC7fAAAu5gAALugAAC73AAAvCcAALxbAAC8kgAAvOIAAL0sAAC9kAAAvbcAAL3pAAC+LwAAvmkAAL6/AAC++gAAvzcAAL98AAC/qwAAv+AAAL/lAADAJgAAwHAAAMCHAADAkgAAwJ0AAMC7AADA1QAAwOoAAMEEAADBGQAAwT4AAMFdAADBgQAAwYUAAMGJAADBlwAAwa8AAMHNAADB4gAAwegAAMIBAADCGQAAwiQAAMIwAADCPwAAwk0AAMJcAADCagAAwnwAAMKMAADCnQAAwq4AAMLEAADC3AAAwu4AAMMIAADDHAAAwzoAAMNTAADDZgAAw4MAAMOdAADDsQAAw84AAMPnAADD+gAAxBYAAMQzAADESwAAxG4AAMSJAADEnwAAxMIAAMTfAADE9QAAxRkAAMU2AADFTAAAxXAAAMWXAADFuAAAxecAAMXzAADF/wAAxgsAAMYXAADGIwAAxwcAAMjIAADKeAAAyoMAAMqVAADKoQAAyrQAAMq/AADKygAAytYAAMriAADK7QAAyw4AAMuhAADLswAAy8wAAMvyAADMEAAAzFsAAMyWAADM2wAAzQ0AAM1HAADNbAAAzaIAAM29AADN0QAAzfEAAM4bAADOVAAAzoEAAM6oAADO1gAAzwwAAM85AADPiQAAz78AAM/DAADP6QAA0AwAANAyAADQTwAA0GsAANCXAADQtgAA0OAAANEMAADRQQAA0VoAANF3AADRoAAA0bcAANHRAADR6gAA0gIAANIOAADSHwAA0jAAANJJAADSYQAA0nkAANKJAADSlwAA0qQAANLBAADS0wAA0t8AANLrAADTEwAA0zsAANM/AADTUAAA02EAANN7AADTlgAA06sAANPAAADT1QAA0+gAANP8AADUEAAA1CIAANQ2AADUSQAA1FsAANRvAADUhAAA1JgAANSsAADUvQAA1M8AANTjAADU+AAA1Q4AANUiAADVNQAA1UcAANVbAADVcQAA1YcAANWdAADVrgAA1b8AANXRAADV5wAA1f0AANYSAADWJgAA1jcAANZKAADWXwAA1nQAANaHAADWmwAA1rAAANbFAADW1wAA1uoAANb8AADXEAAA1yUAANc6AADXTwAA12IAANd0AADXiAAA150AANezAADXyAAA19oAANfrAADX/QAA2BMAANgpAADYPgAA2FIAANhjAADYdAAA2IkAANieAADYswAA2McAANjZAADY7AAA2QEAANkWAADZKQAA2T0AANlSAADZZwAA2XwAANmNAADZnwAA2bMAANnIAADZ3gAA2fQAANoGAADaFwAA2igAANo+AADaVAAA2mkAANp9AADajgAA2qEAANq2AADaywAA2uAAANr0AADbBgAA2xkAANssAADbQQAA21YAANtpAADbewAA244AANujAADbuAAA28sAANvfAADb9AAA3AoAANwgAADcMgAA3EMAANxUAADcagAA3IAAANyWAADcqgAA3LsAANzOAADc4gAA3PgAAN0NAADdIQAA3TMAAN1EAADdWAAA3WwAAN2CAADdlgAA3agAAN27AADdzwAA3eEAAN31AADeCQAA3hsAAN4uAADeQgAA3lcAAN5sAADedwAA3oIAAN6NAADemAAA3qMAAN6uAADeuQAA3sQAAN7PAADe2gAA3uUAAN7wAADe+wAA3wYAAN8RAADfHAAA3ycAAN8yAADfPQAA30gAAN9TAADfXgAA32kAAN90AADffwAA34oAAN+VAADfoAAA36wAAN+4AADfxAAA39AAAN/2AADgGwAA4EsAAOB+AADgqQAA4NQAAOD/AADhKwAA4UcAAOFoAADhiQAA4cAAAOH7AADiNwAA4k8AAOJoAADipAAA4sMAAOLqAADjBQAA4x0AAONbAADjmQAA484AAOQDAADkNQAA5GYAAOSYAADkyQAA5PYAAOUhAADlTQAA5YMAAOW9AADmAwAA5iUAAOZHAADmgAAA5rMAAObtAADnKQAA50MAAOddAADngAAA56cAAOe+AADn8wAA5/4AAOf+AADn/gAA5/4AAOgWAADoJgAA6EEAAOhIAADocwAA6KEAAOjMAADo0wAA6PQAAOkhAADpWwAA6ZUAAOmcAADpowAA6csAAOnSAADp2QAA6eAAAOnnAADp7gAA6hkAAOpNAADqXwAA6oEAAOq1AADq0gAA6woAAOsnAADrXQAA634AAOubAADrvAAA6+QAAOwNAADsKAAA7D8AAOxkAADsjQAA7LoAAOzLAADs5wAA7P8AAO0lAADtRQAA7WcAAO19AADtmQAA7bYAAO3eAADuAgAA7jYAAO49AADuWAAA7oMAAO6dAADuwQAA7ugAAO72AADvGAAA7yEAAO8mAADvPgAA71cAAO9xAADvhwAA76gAAO+5AADv0AAA7+EAAO/9AADwBAAA8AsAAPASAADwGQAA8CkAAPB6AADwhAAA8IsAAPCVAADwnwAA8KYAAPCtAADwtAAA8LsAAPDCAADwyQAA8NAAAPDXAADxHQAA8WMAAPGdAADx4QAA8egAAPHyAADyDQAA8jIAAPJSAADyfAAA8qkAAPLHAADy9QAA8xYAAPMvAADzVgAA83EAAPOBAADziAAA85sAAPO5AADzzgAA8/kAAPQbAAD0KwAA9DIAAPRBAAD0cAAA9HkAAPSAAAD0hwAA9I4AAPSXAAD0ngAA9KUAAPSsAAD0swAA9LwAAPTFAAD0zAAA9NUAAPTeAAD05wAA9PAAAPT5AAD1AAAA9QcAAPUQAAD1FwAA9R4AAPUlAAD1LAAA9TMAAPU6AAD1QQAA9UgAAPVPAAD1VgAA9V0AAPVkAAD1awAA9XIAAPV5AAD1gAAA9YcAAPWOAAD1lQAA9ZwAAPWjAAD1qgAA9bEAAPXsAAD2DAAA9h8AAPYrAAD2TgAA9n8AAPaPAAD2rgAA9rUAAPbQAAD3BgAA9yEAAPdTAAD3cQAA93gAAPd/AAD3hgAA940AAPelAAD3ugAA990AAPfkAAD4DgAA+B0AAPgyAAD4UAAA+G4AAPiOAAD4oAAA+LkAAPjLAAD47AAA+RwAAPk0AAD5WAAA+XAAAPmQAAD5sgAA+ekAAPoHAAD6DgAA+hUAAPocAAD6IwAA+ioAAPoxAAD6OAAA+j8AAPpGAAD6TQAA+lQAAPpbAAD6YgAA+mkAAPpwAAD6dwAA+n4AAPqHAAD6jgAA+pUAAPqcAAD6owAA+qoAAPqxAAD6uAAA+r8AAPrGAAD6zQAA+tQAAPrbAAD64gAA+ukAAPrwAAD69wAA+yUAAPtZAAD7bwAA+5QAAPubAAD7ugAA+8EAAPvhAAD8HAAA/EAAAPxHAAD8TgAA/FUAAPxcAAD8ewAA/JIAAPy6AAD84wAA/RIAAP0mAAD9QAAA/WoAAP2MAAD9sQAA/csAAP3qAAD98QAA/hcAAP4eAAD+OwAA/mQAAP6AAAD+qAAA/tUAAP8UAAD/NwAA/z4AAP9FAAD/TAAA/1MAAP9cAAD/YwAA/2wAAP9zAAD/egAA/4EAAP+KAAD/kwAA/5wAAP+lAAD/rAAA/7MAAP+6AAD/wQAA/8gAAP/PAAD/1gAA/90AAP/kAAD/6wAA//IAAP/5AAEAAgABAAkAAQASAAEAGQABACAAAQAnAAEALgABADUAAQBaAAEAiwABAJ8AAQDAAAEAxwABAM4AAQDqAAEA8QABAQ8AAQFHAAEBagABAXEAAQF4AAEBfwABAYYAAQGNAAEBlAABAZsAAQGiAAEBvwABAdQAAQH5AAECAAABAiwAAQJAAAECVQABAncAAQKWAAECuwABAtIAAQLtAAEC9AABAxoAAQMhAAEDOwABA2EAAQN6AAEDnwABA6YAAQPlAAEECAABBA8AAQQWAAEEHQABBCQAAQQtAAEENgABBD0AAQRGAAEETQABBFQAAQRbAAEEZAABBG0AAQR2AAEEfwABBIgAAQSRAAEEmgABBKMAAQSqAAEEsQABBLgAAQS/AAEExgABBM0AAQTUAAEE2wABBOIAAQTpAAEE8AABBPcAAQUAAAEFBwABBRAAAQUXAAEFHgABBSUAAQUsAAEFNQABBW0AAQWoAAEF3wABBiIAAQZkAAEGqQABBrAAAQa3AAEG8wABBy8AAQdZAAEHjQABB8gAAQgDAAEIPAABCEMAAQhKAAEIjgABCJUAAQicAAEIowABCMYAAQjxAAEJDgABCSoAAQlPAAEJcQABCZEAAQm6AAEJ1gABCfIAAQoWAAEKPgABCmQAAQqSAAEKmQABCqAAAQqnAAEKrgABCrUAAQq8AAEKwwABCsoAAQrRAAEK2AABCt8AAQrmAAELAgABCwkAAQsQAAELFwABCx4AAQslAAELLAABCzMAAQs6AAELQQABC0gAAQtRAAELWgABC2MAAQtsAAELdQABC34AAQuHAAELkAABC5kAAQuiAAELyQABDBEAAQxhAAEMkQABDMkAAQz7AAENNwABDWsAAQ2eAAEN1wABDh0AAQ5eAAEOqAABDuYAAQ8oAAEPZgABD34AAQ+UAAEP2QABEAwAARBPAAEQmQABENUAAREiAAERdgABEa4AARHYAAESAgABEisAARJPAAESfwABErgAARLvAAETKQABE2EAARObAAETzwABE/UAARP8AAEUKQABFDAAARRtAAEUpAABFNcAARURAAEVVQABFZQAARXBAAEVyAABFc8AARXYAAEWIAABFkQAARZmAAEWbQABFnQAARaOAAEWpgABFr4AARbXAAEW8QABFwsAARckAAEXPQABF1YAARdvAAEXiQABF6MAAReqAAEXsQABF7gAARe/AAEXxgABF80AARfUAAEX2wABF+IAARfpAAEX8AABF/cAARgeAAEYPwABGGIAARiGAAEYqgABGNAAARj0AAEZGgABGUEAARlnAAEZjgABGbUAARnpAAEaFwABGkcAARp4AAEaqQABGtsAARsMAAEbPwABG3IAARulAAEb2QABHA0AARwNAAEcJgABHD8AARxaAAEcYQABHGgAARxvAAEclAABHLcAARzcAAEdDQABHT0AAR1vAAEdkgABHbUAAR3NAAEd8gABHhcAAR43AAEeVwABHoMAAR6vAAEe6QABHy4AAR9zAAEfwgABIAYAASBVAAEgbwABIKQAASDdAAEg/AABIRsAASFEAAEhawABIaEAASHLAAEh0gABIdkAASHgAAEh5wABIe4AASH1AAEh/AABIgMAASIjAAEiRQABImQAASJrAAEijgABIq4AASK1AAEi1AABIvAAASL3AAEjIQABI1oAASOeAAEjuAABI9EAASPYAAEj8wABJA8AASQmAAEkRwABJHYAASSxAAEkwwABJPUAASU4AAElWAABJYYAASWMAAElkwABJZoAASWhAAElqgABJbMAASW8AAElwwABJcoAASXRAAEl2AABJewAASX6AAEmGAABJkcAASZRAAEmagABJoMAASaYAAEmpgABJrMAASa7AAEmywABJtgAASb4AAEnCAABJyQAASdMAAEnYgABJ4IAASeuAAEnuAABJ+8AASgaAAEoOAABKFMAAShfAAEoawABKHgAASiiAAEosAABKL0AASjLAAEo1gABKN0AASj/AAEpCgABKS0AASlJAAEpTgABKVYAASleAAEpbgABKX4AASmXAAEpsAABKdkAASnsAAEqDgABKhYAASoyAAEqMgABKkAAAIAwQAABAoFtgADAAcAHkAMBAMCBQMFCAkEAwcAAC8yLzMREgE5OREzETMxMBMhESE3IREhwQNJ/LdoAnn9hwW2+kpoBOYAAAIAk//jAZEFtgADAA8AOUAeAgQDCgQKEBFQAWABAg8BAQsDAQECDQ0HfVkNEwIDAD8/KxESADkYL19eXV0REgE5OREzETMxMAEjAzMDNDYzMhYVFAYjIiYBTnUz2+5BPj5BQzw9QgGcBBr6uUJHSUBATEoAAgCFA6YCvgW2AAMABwAfQA0AAwQHAwcICQYCBwMDAD8zzTIREgE5OREzETMxMAEDIwMhAyMDAUgpcSkCOShxKQW2/fACEP3wAhAAAAIAMwAABPYFtgAbAB8Ae0BDGBkGBxQVCggLHA4fDxUSBAcBHQAeGRYbAxYeHQcSDw4LEAwMICEAHxAQGRU/EU8RAhEREwQIDAwBHA0NChcTAwYKEgA/Mz8zEjkvMzMzETMzETkvXTMzMxEzMxESARc5ETMRMxEzETMRMxEzETMRMzMRMxEzETMxMAEDIRUhAyMTIQMjEyE1IRMhNSETMwMhEzMDIRUBIRMhA9c/ARj+zVSRVP7bUJBO/v4BHUH+7gErUpNSASdSjlIBBPzrASVA/tsDff64if5UAaz+VAGsiQFIhwGy/k4Bsv5Oh/64AUgAAAADAH//iQQQBhIAIAAmAC0Ac0A/GQAJJxEdJRcDBAQNKhQDBQAhIQURAy4vJA4OHSoqBhclDQYNdlkFAwAGAQsDBiscFxx2WRUUQBcBLxfvFwIXAC9dXTMzKxEAMxgvX15dMzMrEQAzERI5ETMzETMREgEXOREzERczMxEXMxEzMxEzMTABFAYHFSM1JiYnNRYWFxEmJjU0Njc1MxUWFwcmJxEeAgc0JicRNgEUFhcRBgYEEMu8g3LRREvZY82qyq2Dvqs4m5agnUq2XnPR/exWal5iAcGJtBfk2wIjH6gjMwIBrEGviYSqE7SyBUeNPQv+WjZge2RIVSj+iSAC/UlaJgF1EFoAAAAFAGT/7AZCBcsACQAVACAALAAwAEdAJQAQCgUWJyEcHC0nBS8QBjEyHioDDSoNKg0kEzADLxIHEwQZJBMAPzM/Mz8/ERI5OS8vETMRMxESARc5ETMRMxEzETMxMBMUFjMyERAjIgYFFAYjIiY1NDYzMhYBFBYzMjY1ECMiBgUUBiMiJjU0NjMyFgEBIwH6R06enk5HAcmdl46dmZKToQG2R05RTZ5ORwHJnpaOnZiTk6H+9fzVnQMrBAKlpwFMAUqlpeTp797l5O382qeko6gBSKOl4+nv3eXk7QMi+koFtgAAAAADAG3/7AXXBc0ACwAUADMAXkA1EhUAHCUDIgYrJiwpJi4OBgMPGBwVCzQ1AxglDwQfLisxDiYMKCgfLRIfCWxZHwQxDGlZMRMAPysAGD8rABg/EjkvEjk5Ejk5ERc5ERIBFzkRMxEzETMRMxEzMTABFBYXNjY1NCYjIgYTMjcBBgYVFBYlNDY3LgI1NDYzMhYVFAYHATY3MwIHASMnBgYjIiYBpkdQfGZmUVlpmeOc/laKYZr+qoa1TUEkxrOlvYmcAYtdM7ZFjAEr9LJy85PX9QSPQYJPRX5RTFxe+7CSAZ1TiFxxh/KCxGhXZWs7lKyolG22XP6EbNn+45v+3axmWtIAAAEAhQOmAUgFtgADABS3AAMDBAUCAwMAP80REgE5ETMxMAEDIwMBSClxKQW2/fACEAAAAAABAFL+vAIpBbYADQAcQAwHAAQKAAoODwMDCyQAPz8REgE5OREzETMxMBMQEjczBgIVFBIXIyYCUpuSqpCRlIuok5oCMQEJAc6uwf4y9PD+Nr2qAcYAAAAAAQA9/rwCFAW2AA0AHEAMCgQABwQHDg8KAwQkAD8/ERIBOTkRMxEzMTABEAIHIzYSNTQCJzMWEgIUm5Koi5SRkKqTmgIx/vn+Oqi8Acvw9AHOwa/+MQAAAAEAVAJ7BBAGFAAOAEVAKgAOAQ0EBwMFBw0OCgkLCA8QDQEKBAcGBg4CCwMDPwwBDAwfCC8IAggOAAA/xF05L10XMxEXORESARc5ETMRMxEzMTABAyUXBRMHAwMnEyU3BQMCkykBihz+iPOwrp628P6LHQGFKwYU/ndvvR7+vGABZv6aYAFEHr1vAYkAAQBmAOMEKQTDAAsANEAfAAQECQUCBQcDDA0DBwcALwhfCH8IrwjPCO8I/wgHCAAvXTMzETMREgEXOREzMxEzMTABIRUhESMRITUhETMCkQGY/miT/mgBmJMDG5L+WgGmkgGoAAEAVP74AYkA7gAGAB5AEQQBBwgELwY/Bq8GvwbPBgUGAC9dxhESATk5MTAlFwYDIzY3AXsON3mFQSXuF9b+9/r8AAAAAQBSAdUCQgJ1AAMAKEAZAAMEBQC1AQGKAQEvAV8BvwHPAe8B/wEGAQAvXV1dMxESATk5MTATNSEVUgHwAdWgoAAAAAABAJP/4wGRAPgACwAWQAoABgwNCQN9WQkTAD8rERIBOTkxMDc0NjMyFhUUBiMiJpNBPD1ERD07Qm9CR0dCQUtKAAAAAAEAFAAAAuUFtgADABO3AgAEBQMDAhIAPz8REgE5OTEwAQEjAQLl/eCxAiEFtvpKBbYAAAACAGT/7AQvBc0ACwAXAChAFAwGABIGEhgZCRVzWQkHAw9zWQMZAD8rABg/KxESATk5ETMRMzEwARACIyICERASMzISARASMzISERACIyICBC/x9u727ffu+fzqkZ6gkZGgoI8C3f6D/owBfwFyAX4Bcv6A/pD+wv7mASEBNwE0ASL+5AABALYAAALXBbYACwAkQBAABAEJAQwNBAoICAEKBgEYAD8/EjkvEjkREgE5OREzMzEwISMRNDcGBwYHJwEzAtewCBwgZnleAYyVA/6IdB4aVWN5ATMAAAAAAQBiAAAEKQXLABoANEAbABMOARMHBxkBAxscEApzWRAHAgEZARl1WQEYAD8rERIAORg/KxESARc5ETMRMxEzMTAhITUBPgI1NCYjIgYHJzYzMhYVFAYGBwEVIQQp/DkBf61wN4p4WqFlYM3zzO9GjKX+zwLjlgGDr5aMUXSCPE95rNG0YLO5of7TCAABAFz/7AQfBcsAJwBfQDQDBAQbIg0AGwcTExsWDQQoKQMWFxcWc1kJFwE6FwEDDxcBDwYXFwolJR5zWSUHChFzWQoZAD8rABg/KxESADkYL19eXV9dcSsREgA5ERIBFzkRMxEzETMRMxEzMTABFAYHFRYWFRQEISImJzUWFjMgERAhIzUzMjY1NCYjIgYHJzY2MzIWA/Kdkq+t/t7+8XTDW13UYwFy/meOkKzCjX1hpWtaWuuE1u8EYIy0HggWtJDR4yItqC4yASEBApmTgml2NUZ7R1HDAAACACsAAARqBb4ACgATAEBAIBMFCQICCw4DAAMFAxQVDwMHAQUTBXVZBgkTEwMHBgMYAD8/EjkvMzMrEQAzERI5ERIBFzkRMzMzETMRMzEwASMRIxEhNQEzETMhETQ3IwYGBwEEatSv/UQCsLvU/n0LCQs3Fv5KAUz+tAFMmQPZ/DABzIm7Gmge/ZAAAAEAgf/sBB8FtgAaAD1AHxcPCBkUAw8UDxscABFzWQAABhUVGHVZFQYGDHNZBhkAPysAGD8rERIAORgvKxESATk5ETMRMzMRMzEwATIEFRQAISInNRYWMzI2NRAhIgcnEyEVIQM2AjPlAQf+3v7/9YZK0GOsvP6SdoRaNwLd/b0jcwOB5sjk/v1Pqiw0pJgBKB45ArKk/lgXAAAAAAIAcf/sBDMFywAWACQAUkAsBRoKISEAEBoAGiUmChMNDR12WQ8NHw1fDQMQAw0NEwICB3ZZAgcTF3NZExkAPysAGD8rERIAORgvX15dKxESADkREgE5OREzETMRMxEzMTATECEyFxUmIyICAzM2MzIWFRQCIyImAgEyNjU0JiMiBgYVFBYWcQKddEFMZen3DQxu7Mbi+teW4XoB74uWkolXkVdQjgJxA1oTmRj+5P7MrPDM4/74mAEl/tqyopKdSIBEZK9kAAAAAQBaAAAELwW2AAYAJ0ATBQEBAAIDBwgAGAUCAwMCdVkDBgA/KxESADkYPxESARc5ETMxMCEBITUhFQEBFwJY/OsD1f2uBRKki/rVAAMAZP/sBC0FywAWACIALwBMQCcGLREmFw8mFAMtCR0dLRQPBDAxEQYgICoqDAAAI3ZZAAcMGnZZDBkAPysAGD8rERIAOREzEjk5ERIBFzkRMxEzETMRMxEzETMxMAEyFhUUBgcWFhUUBCMiJDU0JSYmNTQ2AxQWMzI2NTQmJwYGASIGFRQWFhc2NjU0JgJIyO6FkrGV/v7d6v8AAS+IeO9upZGPpJy4kYQBMnaMNmZwiXOOBcu6pG6uS1a7erbZzL36jU60cZ+9+6h0hId3YJZDPpgDXXBjP2BMLzmIWWRvAAAAAAIAZv/sBCkFywAXACUAUEArBBsbEQAiCxELJicMFA4OHnZZAA4QDlAOAxADDg4CFBQYc1kUBwIHdlkCGQA/KwAYPysREgA5GC9fXl0rERIAORESATk5ETMzETMRMzEwARAhIic1FjMyNjY3IwYjIiY1NBIzMhYSASIGFRQWMzI2NjU0JiYEKf1gdERNZ6PUbwgNcOzB4/7VluF5/hCImI+JWpRUT44DRvymFJobg/vQquvP4gEMmf7bASayopKbSXxFZK5lAAAAAgCT/+MBkQRmAAsAFwAoQBQMABIGAAYYGQ8VfVkPEAkDfVkJEwA/KwAYPysREgE5OREzETMxMDc0NjMyFhUUBiMiJhE0NjMyFhUUBiMiJpNBPD1ERD07Qj8+P0JEPTtCb0JHR0JBS0oDrkJJSENAS0oAAAACAD/++AGNBGYABwASAC9AHA0BCAQEExQEDwcfB48HnwevBwULAwcLEH1ZCxAAPysAGC9fXl3GERIBFzkxMCUXBgMjNhI3AzQ2MzIVFAYjIiYBaA80fIgbQQ0ZPz6BRD07Qu4Xx/7oaAEyXALtQkmLQklKAAEAZgDuBCsE3QAGADlAIQUBBAABAAcIwAABADADcAOwAwMDAgEAAwQvBV8FjwUDBQAZL10XMxgvXS9dERIBOTkRMxEzMTAlATUBFQEBBCv8OwPF/PwDBO4BqGYB4aD+lP68AAACAHMBvAQdA+UAAwAHADZAIAAEAwcEBwgJBU8EAQAEAQTHAAEADwEvAa8BzwHvAQUBAC9dM13GXV0yERIBOTkRMxEzMTATNSEVATUhFXMDqvxWA6oDVJGR/miSkgAAAAABAGYA7gQrBN0ABgA7QCMCBgUBBgEHCIAGwAYCBjADcAOwAwMDBAUDBgQvAV8BjwEDAQAZL10XMxgvXS9dERIBOTkRMxEzMTATAQE1ARUBZgME/PwDxfw7AY0BQgFuoP4fZv5YAAAAAAIAGf/jA0QFywAcACcAREAmHAAUCAgiAB0PBSgpUABgAAIPAAELAwAAJRElH31ZJRMRC35ZEQQAPysAGD8rERIAORgvX15dXRESARc5ETMRMzEwATU0Njc+AjU0JiMiBgcnNjMyFhUUBgYHBgYVFQM0MzIWFRQGIyImAR9MYG1EHIB4UJlfP77VvtooUXljQb5/OkNEOThHAZw3d5VQXFBQNWNqKC6PZb2qSnJmZVVvWyL+04lFRENJQgAAAgB3/0oGugW2ADYAQABLQCUqFxUkLzcOFTwAHBw8Di8EQUIIMz4SEjMZOQQ5CwssIDMDJywlAD8zPzMSOS8zMxEzETkvMxE5ERIBFzkRMxEzETMRMxEzMzEwARQGBiMiJicjBgYjIiY1NDY2MzIWFwMVFDMyNjU0AiQjIgQCFRAAITI3FQYjIAAREBIkITIEEgEUMzITEyYjIgYGulmgaVV0DgonlmSYrmvIgEWxQxd/WW6T/vGu3P65rgE+AS/U5Mjw/pT+bNcBjAEB1wFRt/v2wc0SDElOgZQC24zshGNPU1/LsoXTdRkY/iwWsNKtswEOkbf+reH+0v67WItUAY8BZAEDAZfftv6y/qb6ATUBABWyAAAAAAIAAAAABRsFvAAHAA0ALEAWBAcIDQMODwoEBQ0CaVkNDQQFAwAEEgA/Mz8SOS8rERIAORESARc5MjEwIQMhAyMBMwEBAycGBwMEXLD9vK66AjumAjr+WqRGHiGmAcX+OwW8+kQCaAG723hj/kUAAAADAMcAAATFBbYADwAYACEAYkA1BwgIFBAaGg8EFAseHhQPAyIjBxkQEBlrWdgQAToQAQMPEAEPBRAQDwAAGGtZAAMPGmtZDxIAPysAGD8rERIAORgvX15dX11dKxESADkREgEXOREzETMRMxEzETMRMzEwEyEgBBUUBgcVFhYVFAQjIRMhMjY1NCYjIxERITI2NTQmI8cBoQEmAQWOiKmf/vTw/f64AQ6snKu58gEnsKq0tAW2r7uCqRkKHbCRxNwDSG6BeGr9lf3uiIqDfQAAAQB9/+wEzwXLABcAJkAUAw8VCQ8DGBkTAGlZEwQMBmlZDBMAPysAGD8rERIBFzkRMzEwASIAERAAMzI3FQYGIyAAETQSJDMyFwcmAzns/vIBBvKcw12scP69/qOnAT/Y6KxKrwUp/sT+7v7l/s06oCIZAYkBaOIBVLhWnFAAAAACAMcAAAVaBbYACAAQAChAFA4EAAkECRESBQ1rWQUDBA5rWQQSAD8rABg/KxESATk5ETMRMzEwARAAISERISAAAxAAISMRMyAFWv51/o/+aQHCAVUBfML+7f7q8McCUgLp/pb+gQW2/on+pAEXAR/7hQAAAAEAxwAAA/gFtgALAFVAMQYKCgEEAAAIAQMMDQYJaVnYBgE6BgEJBgEPAAagBgISAwYGAQICBWlZAgMBCmlZARIAPysAGD8rERIAORgvX15dXl1dXSsREgEXOREzETMRMzEwISERIRUhESEVIREhA/j8zwMx/YcCVP2sAnkFtqL+OKD99gAAAAABAMcAAAP4BbYACQBGQCsGAAABBAgBAwoLBglpWUkGAQ8GPwZfBm8GjwafBgYLAwYGAgESAgVpWQIDAD8rABg/EjkvX15dXSsREgEXOREzETMxMCEjESEVIREhFSEBf7gDMf2HAlL9rgW2ov36oQAAAQB9/+wFOwXLABsAPUAgDgIUCAIZGRsIAxwdABtpWQAABQwMEWlZDAQFF2lZBRMAPysAGD8rERIAORgvKxESARc5ETMRMxEzMTABIREGBiMgABE0EiQzMhcHJiMgABEQACEyNxEhA0IB+XTwnv6y/pK2AVfp6spGwbj++/7aARoBDZOM/r8DBP0zJSYBjAFj5QFWtVagVP7E/u7+3v7SIwGyAAABAMcAAAUlBbYACwBFQCcIBAQFAAkBBQEMDQgDaVk4CAGaCAFpCAEwCAGQCAEICAUKBgMBBRIAPzM/MxI5L11xXV1xKxESATk5ETMzETMRMzEwISMRIREjETMRIREzBSW4/RK4uALuuAKq/VYFtv2WAmoAAAEAUgAAAmIFtgALADlAHAgAAAoFAQEKAwMMDQgFBgVuWQYDCwIBAm5ZARIAPysRADMYPysRADMREgE5ETMzETMRMxEzMTAhITU3ESc1IRUHERcCYv3wrKwCEKysaicEkilqain7bicAAf9g/nsBdQW2AA0AH0AOAgsICA4PCQMABWlZACIAPysAGD8REgE5ETMyMTADIic1FjMyNjURMxEUBgheOkdNZGS5xf57G5sUeXIFrvphxtYAAAAAAQDHAAAE9AW2AAwAOEAbCwAADggEBAUMAgUCDQ4CDAgDAwMFCgYDAQUSAD8zPzMSOREXMxESATk5ETMRMxEzETMRMzEwISMBBxEjETMRNwEzAQT02f35lbi4fgIJ1/29ArqD/ckFtv0viwJG/YMAAAEAxwAAA/4FtgAFAB9ADgMAAAUGBwEDAANpWQASAD8rABg/ERIBOTkRMzEwMxEzESEVx7gCfwW2+u6kAAEAxwAABnsFtgATADBAFwIFBQYNEQ4GDhQVAhIKAwYLBwMADgYSAD8zMz8zEhc5ERIBOTkRMzMRMxEzMTAhASMWFREjESEBMwEhESMRNDcjAQNM/h4ID6oBEAHDCAHLAQ62Dgj+GAUCoOv8iQW2+1IErvpKA4OW5/sAAAABAMcAAAVOBbYAEwAsQBQDBwcIABEOCA4UFQMOCBIJAwEIEgA/Mz8zEjk5ERIBOTkRMzMRMxEzMTAhIwEjFxYVESMRMxcBEzMmAjURMwVO1/zxCAUMqtUtAeD/CAIMrAS+UbaG/M8Ftkf9Gv5zGAEnQgM5AAIAff/sBcMFzQALABcAKEAUDAYAEgYSGBkJFWlZCQQDD2lZAxMAPysAGD8rERIBOTkRMxEzMTABEAAhIAAREAAhIAABEBIzMhIREAIjIgIFw/6d/sH+vf6fAV8BRwE+AWL7fPbs6/Ty6+72At3+of5uAYsBaAFlAYn+cf6f/t7+0AEsASYBJQEp/tMAAgDHAAAEbwW2AAkAEgAyQBkKBQUGAA4GDhMUBAprWQQEBwYSBxJrWQcDAD8rABg/EjkvKxESATk5ETMRMxEzMTABFAQhIxEjESEgATMyNjU0JiMjBG/+zv7qqLgBgwIl/RCT2sS2wboECODv/ccFtv0hjZyNjAAAAgB9/qQFwwXNAA8AGwBAQCEEABAKABYWAwoDHB0ABQEJAwUHDRlpWQ0EAwcHE2lZBxMAPysRADMYPysAGBDGX15dERIBFzkRMxEzETMxMAEQAgcBIQEHIAAREAAhIAABEBIzMhIREAIjIgIFw97MAV7++P7lM/69/p8BXwFHAT4BYvt89uzr9PLr7vYC3f7r/o5G/pQBSgIBiwFoAWUBif5x/p/+3v7QASwBJgElASn+0wAAAgDHAAAE2wW2AAgAFQBBQCATEBIEAAoKCxAECwQWFxIJAAlrWQAADBQLEgwIaVkMAwA/KwAYPzMSOS8rEQAzERIBOTkRMxEzETMRMxEzMTABMzI2NTQmIyMRESMRISAEFRAFASMBAX/bsqSmutG4AZMBEAEF/tsBkdf+ngL4jIqKf/1F/aQFts/Q/t1l/XECXAAAAAEAaP/sBAQFywAlADpAHhgABR4TAAwTDCYnEx4MAAQDFhYbaVkWBAMJaVkDEwA/KwAYPysREgAXORESATk5ETMRMzMRMzEwARQEIyAnNRYWMzI2NTQmJicmJjU0JDMyFwcmIyIGFRQWFhceAgQE/uz2/vyOXd1gpKY8jY/LrgEB0tu4ObyigpM5f4ijoEwBh7/cRbAoLn5uSV5SNErJn6vKUp5OcGVIX04yPHGTAAAAAQAUAAAEXAW2AAcAJUASAAEGAQMDCAkBEgcDBANpWQQDAD8rEQAzGD8REgEXOREzMTAhIxEhNSEVIQKWuf43BEj+OgUUoqIAAQC4/+wFHwW2ABEAJUARCgcBEAcQEhMRCAMEDWlZBBMAPysAGD8zERIBOTkRMxEzMTABERQAISAANREzERQWMzI2NREFH/7S/vT+9/7cub/AtcMFtvxO+v7iASH7A678TLPEwrcDsgABAAAAAATNBbYACgAaQAsIDAALBAoHAAMKEgA/PzIROREBMxEzMTARMwEWFzY3ATMBI8EBST8dGUQBR8P99rkFtvxWso1+xQOm+koAAQAZAAAHVgW2ABgAIkAQCRgZGg0UBAMIFxAJAwEIEgA/Mz8zMxIXORESATk5MTAhIwEmJwYHASMBMxMWFzY3ATMBFhc2NxMzBdG6/uM/CxA2/uy6/n3A4y4YFjgBAr4BDTQcEDfiwAO+1ktztPxIBbb8g6+tpMMDcvyHrbOK1AN7AAAAAQAIAAAEqAW2AAsALkAXBgQIAgoAAAsCBQQFDA0IAgQJBgMBBBIAPzM/MxI5ORESARc5ETMRMxEzMTAhIwEBIwEBMwEBMwEEqNH+ff53wwHm/jnNAWYBacL+PAJ7/YUC+gK8/cMCPf1IAAAAAQAAAAAEhwW2AAgAIkAPAgoHBAUFCQoABQEHAwUSAD8/MxI5ERIBOREzMhEzMTABATMBESMRATMCRAF9xv4Zuf4ZyQLnAs/8gf3JAi8DhwABAE4AAAREBbYACQA4QB0EAQcAAAMIAQQKCwcEBQUEaVkFAwIIAQEIaVkBEgA/KxESADkYPysREgA5ERIBFzkRMxEzMTAhITUBITUhFQEhBET8CgMC/RYDyfz+AxeLBIeki/t5AAAAAAEApP68Am8FtgAHACBADgYBBAABAAgJBQIDBgEkAD8zPzMREgE5OREzETMxMAEhESEVIREhAm/+NQHL/uUBG/68BvqT+i0AAAEAFAAAAucFtgADABO3AwEEBQMDAhIAPz8REgE5OTEwEwEjAcUCIrL93wW2+koFtgAAAAABADP+vAH+BbYABwAgQA4DBwYBBwEICQMEAwAHJAA/Mz8zERIBOTkRMxEzMTAXIREhNSERITMBG/7lAcv+NbAF05P5BgAAAAABAE4CIwRGBcEABgAXQAkAAwcIBQQAAQMAP80yORESATk5MTATATMBIwEBTgGyZgHgoP6P/rkCIwOe/GIC3/0hAAAAAAH//P7FA5H/SAADABG1AAUBBAECAC8zEQEzETMxMAEhNSEDkfxrA5X+xYMAAQGDBNkDHwYhAAkAIkASBAAKC28FAQWAoAEBDwFfAQIBAC9dXRrNXRESATk5MTABIyYmJzUzFhYXAx95S7Ml1yB2LwTZPL84FUK3NgAAAAACAF7/7APXBFwAGgAlAFVALxMjIwgLHhoBAR4IAyYnAgAWCx9gWQ8LHwt/CwMdAwsLFgAVFg9eWRYQBRteWQUWAD8rABg/KwAYPxI5L19eXSsREgA5ERIBFzkRMxEzETMRMzEwIScjBgYjIiY1ECU3NTQmIyIGByc2NjMyFhURJTI2NTUHBgYVFBYDVCMIUqN8orgCD7psd1ebRDdTxGDHwv4Kl62iva1pnGdJqpsBThAHQX13NCCHLDKwwP0UfaOWYwcHanJWXAAAAAACAK7/7AR7BhQAFAAhADxAHhIKCg0DHw0fIiMJEgYADgANFQAVXVkAEAYcXVkGFgA/KwAYPysAGD8/ERI5ORESATk5ETMRMxEzMTABMhIREAIjIiYnIwYHIxEzERQHMzYXIgYVFRQWMzI2NTQmArbZ7PDVb643Dh8GgbQKCm/HppCTp5SRkgRc/tX+9P7w/tdQT3gTBhT+hnFxpJW84AjhwdnN0NAAAAABAHH/7AOTBF4AFgAoQBQPAwkVAxUXGAYMYVkGEAASYVkAFgA/KwAYPysREgE5OREzETMxMAUiABEQADMyFhcHJiMiBhUUFjMyNxUGAmbt/vgBC/dQnTM3i2Kmnp6bkYxyFAEjARABFAErIRqWNNHPx9NAoDsAAAACAHH/7AQ9BhQAEwAgAEBAIR4DDBcPCRERFwMDISISCAAGDQAQFQYbXVkGEAAUXVkAFgA/KwAYPysAGD8/ERI5ORESARc5ETMzETMRMzEwBSICERASMzIXMyYmNREzESMnIwYnMjY1NTQmIyIGFRQWAjPW7O3X3XcNAwq0kRsIc8akl5mki5iXFAEoAQ8BDQEuohR5FQG2+eyTp5WzzCHlw93NzNIAAAACAHH/7AQbBF4AFAAbAEtAKBIKGAsDChkDGRwdGAteWRkYAQMPGAEQBhgYAAYGFV1ZBhAADmFZABYAPysAGD8rERIAORgvX15dX10rERIBOTkRMxEzMxEzMTAFIgAREAAzMhIVFSEWFjMyNjcVBgYDIgYHISYmAoH3/ucBBt/P9v0QBbSlWJ5qW6CagZYOAi8CihQBKwEGAQgBOf715G27wh8tnicgA9+mlJqgAAAAAAEAHwAAAxkGHwAVADtAHg0XFAICBwMAAwUDFhcDFQsQXVkLAQEFBwVeWRQHDwA/MysRADMYPysAGD8REgEXOREzMxEzETMxMAEhESMRIzU3NTQ2MzIXByYjIgYVFSECqP7rtMDAr7ZpbDBdRltYARUDvvxCA75UPj/IyCWNHniCRwAAAAIAcf4UBD0EXgAMACgASUAmIgoUAygdGg4OKBQDKSoPGREXGw8XB11ZFxARAF1ZERYgJV1ZIBsAPysAGD8rABg/KwAYPxESOTkREgEXOREzMxEzETMzMTAlMjY1NTQmIyIGFRQWBTcjBiMiAhEQEjMyFzM3MxEUBiMiJzUWMzI2NQJQppeYqYqXkwHNBghv5dXv8dHfeQsYj+/88Jug9Yyjf7PGK9zI28vM1nWHpQEpAQ4BCQEyppL7pOzuRqZWpJEAAAABAK4AAARMBhQAFgA0QBkODAgICRYACQAXGA4JEgoAAAkVEgRdWRIQAD8rABg/Mz8REjkREgE5OREzETMRMzMxMCERNCYjIgYVESMRMxEUBzM2NjMyFhURA5p3f6ebtLQKDDG0ccjKAr6Gg7rW/ckGFP44WkBQWr/S/TUAAgCgAAABcwXlAAMADwApQBcAAQoBBAMQEQcNY1mQBwE/BwEHAg8BFQA/P8RdXSsREgEXOREzMTAhIxEzAzQ2MzIWFRQGIyImAWK0tMI9LSo/PyotPQRKASk8NjY8Ozg4AAAAAAL/j/4UAXMF5QANABkANkAeAgsIFAgOAxobERdjWWARAQ8RAQwDEQkPAAVdWQAbAD8rABg/xF9eXV0rERIBFzkRMzIxMBMiJzUWMzI2NREzERQGAzQ2MzIWFRQGIyImLV5ARUNOSbSdJT0tKj8/Ki09/hQZkRRVVwT0+xKkpAdfPDY2PDs4OAABAK4AAAQzBhQADwA4QB0PDgoKCwUIBgQICwQQEQ8IBQMJCQsDDAADDwcLFQA/Mz8/ERI5ERczERIBFzkRMxEzETMzMTABNzcBMwEBIwEHESMRMxEHAWA9RgFf0v5EAdvZ/oN9srIIAjVOVAFz/iv9iwIAbf5tBhT807IAAQCuAAABYgYUAAMAFkAJAAEBBAUCAAEVAD8/ERIBOREzMTAhIxEzAWK0tAYUAAAAAQCuAAAG1QReACIAQkAhFBAQEQcIIgAACBEDIyQaFREYEg8ACBEVAwwYDF1ZHhgQAD8zKxEAMxg/MzM/ERI5ORESARc5ETMRMxEzETMxMCERECMiBhURIxE0JiMiBhURIxEzFzM2NjMgFzM2NjMyFhURBiPfmZCzbXSYjbSRGwovq2oBAk4KNbd0urkCwwEEsrf9ogLDgoK61P3HBEqWUFq4WGDA0/01AAAAAAEArgAABEwEXgAUADJAGAwICAkUAAkAFRYMCRAKDwAJFRAEXVkQEAA/KwAYPzM/ERI5ERIBOTkRMxEzETMxMCERNCYjIgYVESMRMxczNjYzMhYVEQOad3+pmbSRGwozuG/KxAK+hoO70/3HBEqWUVnEz/01AAAAAAIAcf/sBGgEXgAMABcAKEAUDQcAEwcTGBkKFl1ZChADEF1ZAxYAPysAGD8rERIBOTkRMxEzMTABEAAjIiYCNRAAMzIAARQWMzI2NTQmIyAEaP7w8JXmfAEM8ugBEfzDo5+dpKWf/sECJ/7z/tKLAQSsAQwBK/7P/vrP19fPz9EAAgCu/hQEewReABQAIQBAQCEZCwMDBwcIEh8IHyIjAgwADwkPCBsPFV1ZDxAAHF1ZABYAPysAGD8rABg/PxESOTkREgE5OREzETMRFzMxMAUiJyMXFhURIxEzFzM2NjMyEhEQAgMiBgcVFBYzMjY1NCYCtt13DAQItJQYCECobtbt7vWjkQKUpoqbmxSfKU49/j0GNpZaUP7X/vL+8/7SA9u4xSPfx+DIydUAAAIAcf4UBD0EXgAMAB8APkAgChAZHRYDAxoQGiAhHhUNExcPGhsTB11ZExANAF1ZDRYAPysAGD8rABg/PxESOTkREgE5OREXMzMRMzEwJTI2NzU0JiMiBhUUFhciAhEQEjMyFzM3MxEjETQ3IwYCUqGUBJiljZaVb9Tq79XhdQgbj7QKDHOBsMsl48XezMnVlQEsAQsBDAEvqpb5ygHVbjynAAAAAQCuAAADLwReABEALEAVDgoKCwsCEhMOCwAMDwsVAAViWQAQAD8rABg/PxESORESATk5ETMRMzEwATIXByYjIgYGFREjETMXMzY2Aq5JOBY9OleVVLSUFAg/rAReDKYOYKln/bYESsltcAAAAQBo/+wDeQReACMAOkAeFwAFHRIACxILJCUSHQsABAMVFRpeWRUQAwheWQMWAD8rABg/KxESABc5ERIBOTkRMxEzMxEzMTABFAYjIic1FjMyNjU0JicuAjU0NjMyFwcmIyIGFRQWFx4CA3nm0NmAtaiIfHeYm3473MC7oz2nhnB0ZLeJgz4BL5qpRaRYWEpBWjo8VWpMh5xKj0ZHPjxPRjNYbgABACH/7AK2BUYAFgBAQB8LCRAUFAkSBAkEFxgOEEAKEw0QEBNkWRAPBwBdWQcWAD8rABg/KxEAMxEzGhgQzRESATk5ETMRMxEzETMxMCUyNjcVBgYjIBERIzU3NzMVIRUhERQWAh0jXhgZaTb+vpudSGsBPf7DW38OCYoLFQFTAn9WSOr8jP2GX2YAAAEAov/sBEQESgAUAC5AFgETCgcMEwwVFg0QCBQPCxUQBF1ZEBYAPysAGD8/MxI5ERIBOTkRMzMRMzEwAREUFjMyNjURMxEjJyMGBiMiJjURAVh3famatZQaCTG0d8bJBEr9PYWBvNECPPu2kU9WvtECzwAAAQAAAAAEEARKAA0AGEAKDA8BDgULAQ8AFQA/PzM5EQEzETMxMCEBMxMWFzM+AjcTMwEBoP5gwelFEwgDCQxE6sD+XwRK/XnDYA0hJ84Ch/u2AAAAAQAXAAAGMwRKAB8AIkAQCR4gIQ0DGQMIHRIJDwAIFQA/Mz8zMxIXORESATk5MTAhAyYnIwYHAyMBMxISFzM3NjcTMxMeAxczNjcTMwEEL7waMggqIMXM/tO6aG0KCA4fHcPEvQoXFBAECQlAmrj+zwJqTdbDYv2YBEr+a/5aVz6PWgJr/ZUjT01JHUz6Akr7tgAAAAEAJQAABBcESgALAC5AFwELAwkFBwcGCQALBQwNCQMLBAEPCAsVAD8zPzMSOTkREgEXOREzETMRMzEwAQEzAQEzAQEjAQEjAbL+hc0BGwEYy/6FAZDN/tX+0csCMQIZ/mIBnv3n/c8Btv5KAAEAAv4UBBQESgAXACxAFQoZABAQFxgZBAAXFQkADw4TXVkOGwA/KwAYPzM/EjkREgE5OREzETMxMBMzExYXMzY2NxMzAQYGIyInNRYzMjY3NwLB7UsRCAlAFt/C/idFvoxLSjJGVngmOQRK/Y/MXyXLPQJv+x62nhGPDF9jkgAAAQBQAAADcwRKAAkAOEAdAAcHAwMIBAEECgsHBAUFBGRZBQ8CCAEBCGRZARUAPysREgA5GD8rERIAORESARc5ETMRMzEwISE1ASE1IRUBIQNz/N0CTv3VAvH9uwJUdwNHjIf8yAAAAAABADn+vALJBbYAHgBeQDgODw8eDBMTAxoIFxcaHgMfIA4eHtwAAbsAAaoAAYkAAXgAAR8ALwACLwDvAP8AAwAAFwgHAxYXJAA/Mz8zEjkvXXFdXV1dXTMSORESARc5ETMRMzMRMxEzETMxMBM2NjURNDYzFQYGFREUBxUWFhURFBYXFSYmNRE0JiM5hHbXv3Rw33Nsdm7IzoF5AoUCXWABL5uokwReX/7Z0icNFHxq/tNkWAKUAqicAS1oWQAAAAABAen+EgJ/BhQAAwAWQAkCAwMEBQAAAxsAPz8REgE5ETMxMAEzESMB6ZaWBhT3/gABAEL+vALRBbYAHABcQDcUBxkEBBAKAA4NDQoHAx0eDgAA3BwBuxwBqhwBiRwBeBwBHxwvHAIvHO8c/xwDHBwHFBUDCAckAD8zPzMSOS9dcV1dXV1dMxI5ERIBFzkRMzMRMzMRMxEzMTABBgYVERQGIzU2NRE0NzUmNRE0Jic1FhYVERQWMwLReoDVwOPf33Ztxs+AegHwAlZn/s+aqpQEvAEp0ycMJ9MBK2RZApMCpZ7+1WlYAAAAAQBmAkwEKwNYABcAKkAXAxAYGQ8ABkAQE0gGgAMMPxJvEq8SAxIAL10zMxrNKzIyERIBOTkxMAEiBgc1NjMyFhcWFjMyNjcVBiMiJicmJgFQNX82ZJJGd1FJXi42gDZmkEh+SEleAsVDNqBsHyIfGUA5nm4hICAYAAAAAAIAk/6LAZEEXgADAA8AOEAeCgMEAgMCEBFfAW8BAgABEAECCwMBAQ0DDQd9WQ0QAD8rABgvEjkvX15dXRESATk5ETMRMzEwEzMTIxMUBiMiJjU0NjMyFtV3M93vQT4+QUQ7O0QCpPvnBUhDRkVEQklIAAAAAQC6/+wD4QXLABsAVUAiFQgNAwMKBA8AAAQIAxwdAhh0WQUCDRJ0WQpwDYANkA0DDbj/wEAMGh9IDQINAgsEGQsHAD8/Ejk5Ly8rXTMrEQAzKxESARc5ETMRMzMRMxEzMTAlBgcVIzUmAjUQJTUzFRYXByYjIgYVFBYzMjY3A89xjYnMwgGOi5h2NY9lp56gnVmHP/A5BcbMHwEV+QH8PqykBjWWNdDU1cIjGgAAAAABAEIAAARIBckAHQBaQDAQFhgTCQ0NGhYSAgsWEwUeHxQQEwwYGRh3WQkPGQEUAxkZEwAABXNZAAcTEHVZExgAPysAGD8rERIAORgvX15dMysRADMREjkREgEXOREzMxEzETMRMzEwATIXByYjIgYVESEVIRUUBgchFSE1NjU1IzUzETQ2Aqy+rUCngHV6AaH+X0FLAxP7+srExOEFyVSQTnmH/uSI1WCJLaSYL/HXiAEvtc4AAAAAAgB5AQYEFwSgABsAJwBbQDELEREOBRcXHAIaGhwDGRkcAAwQEAkTDiIiEwADKCkJDBATBQIaFwgfUBUBFSWvBwEHAC9dM8RdMhc5ERIBFzkRMxEzMxEzETMzETMRMxEzETMRMxEzETMxMBM0Nyc3FzYzMhc3FwcWFRQHFwcnBiMiJwcnNyY3FBYzMjY1NCYjIga4SIdkh2SCeWiJY4RISIFgiWd6hGKHYoVIiplvb5ubb26aAtN1bIthg0dHg2GJb3SCY4hgg0VHg2CIbHdvmZhwcpqbAAAAAQAdAAAEcwW2ABYAcUBDEg4HCwsQDAMABQkCCQAMFA4VBxcYCg4PDndZB28P3w8C/w8BAA8QDwIJAw8GEhMSd1kDAA8DDxMfEwITEwwBFQYMGAA/PzMSOS9dFzMrEQAzGC9fXl1dcTMrEQAzERIBFzkRMxEzETMzETMRMzEwAQEzATMVIRUhFSERIxEhNSE1ITUzATMCSAFuvf5k/v7LATX+y7D+ygE2/sr6/mm+AuwCyv0Ag6iD/vgBCIOogwMAAAAAAAIB6f4SAn8GFAADAAcAKEASAgYGAwcHCAkDAwcEBAcAAAcbAD8/ETkvETkvERIBOREzMxEzMTABMxEjETMRIwHplpaWlgYU/PT+F/zzAAAAAgB5//YDkwYfAC4AOgBVQC0MHRs1Ay8iLwATBh0pGTU1KQYABDs8FjgsMwM4MxsECSAJEGxZCQEgJm1ZIBIAPysAGD8rERIAFzkRMxEzERIBFzkRMxEzETMRMzMRMxEzETMxMBM0NjcmJjU0NjMyFhcHJiYjIgYVFBYXFhYVFAcWFRQGIyInNRYWMzI2NTQmJyYmNxQWFhc2NTQmJwYGiVZMSlLQxFyXazdhiEpzbXSbtpeZl+zS0ohXvk2AimqexY+aNnmjg4y2QVIDKVeHJChwVHuNHCqJJxw7PThUN0OZbLRcUJGOm0OaJy1KRz1PPUmWhTNLRj5Mb1FtOhJjAAIBMQUMA3EF1wALABcAMEAaBgASDAAMGBkPAwMVzwkBAAkgCQIwCYAJAgkAL11xXTMzETMREgE5OREzETMxMAE0NjMyFhUUBiMiJiU0NjMyFhUUBiMiJgExOCgnOjonKDgBgTgmJzo6JyY4BXM1Ly81NTIyNTUvLzU1MjIAAAAAAwBk/+wGRAXLABYAJgA2AExALicXAw8fLy8UCQ8XBTc4AAASEBJwEoASBBISGwYPDB8MfwyPDAQMDCMzGwQrIxMAPzM/MxI5L10zETkvXTMREgEXOREzETMRMzEwASIGFRQWMzI3FQYGIyImNTQ2MzIXByYBNBIkMzIEEhUUAgQjIiQCNxQSBDMyJBI1NAIkIyIEAgN9d4d1h194PGJBwdPevoJ6PGr8k8gBXsrIAV7Kwv6i0M/+osNtrAErrKwBKq2s/tWsrP7WrQQfq5mdqC+DGxfw29H4Pn02/rzIAV7KyP6iysX+ptDPAVrGrP7WrawBK6ysASqtrP7VAAAAAAIAQgMQAncFxwAYACEAPUAgEhkGCh0YAQEdBgMiIwEDHgoKFAAbAAMQA0ADAwMOFB8APzPUXTLEEjkvMxE5ERIBFzkRMxEzETMzMTABJwYjIiY1NDY3NzU0JiMiByYnNjMyFhURJRQzMjU1BwYGAhcZXoxhcZ6lc05EZGoaFHqGhof+Tm7FYm9iAx1WY2RnZ2oGBC09PDU+Jjxuev4+vmKyLwQEOQAAAAIAUgBzA8MDxQAGAA0AM0AbAwYKDQIECQsLBA0GBA4PDAUIIAEBEAEwAQIBAC9dcTMvMxESARc5ETMRMxEzETMxMBMBFwEBBwElARcBAQcBUgFYgf7hAR+B/qgBlQFdf/7hAR9//qMCKQGcSv6i/qFLAZsbAZxK/qL+oUsBmwAAAQBmAQYEKQMbAAUAKkAZAQIEAgYHAgIELwVfBX8FrwXPBe8F/wUHBQAvXTMzLxESATk5ETMxMAERIxEhNQQpkfzOAxv96wGDkgAA//8AUgHVAkICdQIGABAAAAAEAGT/7AZEBcsACAAWACYANgBpQD0NCQwEJxcAERESCQQfLy8EEhcENzgQDwABAAATDhIPEh8SfxKPEgQIEwATEBNwE4ATBBITEhMjMxsEKyMTAD8zPzMSOTkvL10RM10RMxI5L3EzERIBFzkRMxEzETMRMxEzETMRMzEwATMyNjU0JiMjBRQGBxMjAyMRIxEhMhYBNBIkMzIEEhUUAgQjIiQCNxQSBDMyJBI1NAIkIyIEAgLXZlFZUlpkAa5WSu6wzX+cAQeom/vfyAFeysgBXsrC/qLQz/6iw22sASusrAEqraz+1ays/tatAvxQQUlBhlN5Hf5zAWL+ngN/g/7EyAFeysj+osrF/qbQzwFaxqz+1q2sASusrAEqraz+1QAB//oGFAQGBpwAAwAuQB4ABQEEARsCARsCKwI7AmsCewLLAtsC6wIIDwIBAgIAL19dXXEzEQEzETMxMAEhNSEEBvv0BAwGFIgAAAAAAgB7A1YC8gXLAAsAFwAfQA0MAAYSABIYGQ8JFQMHAD8zxDIREgE5OREzETMxMBM0NjMyFhUUBiMiJjcUFjMyNjU0JiMiBnu4g4W3uISCuXtzT1FublFQcgSPh7W4hIO2s4ZPcXJOUHFwAAAAAgBmAAAEKQTJAAsADwBEQCgHDAAEBAkFAg8PBQwDEBENDAMHBwAgCAEvCF8IfwivCM8I7wj/CAcIAC9dcTMzETMvMxESARc5ETMRMzMRMxEzMTABIRUhESMRITUhETMBNSEVApEBmP5ok/5oAZiT/dUDwwMhkv5aAaaSAaj7N5GRAAAAAQAxAkoCdQXJABgALEAUDQEAEhcBEgYBBhkaCQ8fAhcXASAAPzMSOT8zERIBOTkRMxEzETMRMzEwASE1NzY2NTQmIyIGByc2MzIWFRQOAgchAnP9vux/R0s+PmQ1SIWchJUZNFPyAZACSm7me3FFQUIwKF5xg28uT1Fc5AAAAAABACMCOQKRBckAIQBLQCsCFhwJABYEDg4WEQkEIiMCEREbEgEKEgHIEgEPEh8SXxIDEhIHGR8fDAchAD8zPzMSOS9dXXFxMxI5ERIBFzkRMxEzETMRMzEwARQHFhUUBiMiJzUWMzI1NCMjNTMyNjU0JiMiByc2NjMyFgJ3mLK4qph0joDN4XV1Z19NQmh7SkqQUYidBOeXOSylf446gUacjXFOQTtCTl43LnkAAQGDBNkDHwYhAAkAIkASCQUKC28DAQOAoAkBDwlfCQIJAC9dXRrMXRESATk5MTABNjY3MxUGBgcjAYMmdSjZLLo/dwTyMLFOFUDCMQAAAAABAK7+FAROBEoAFgA5QBwQABMTFAgFChQKFxgPCw0GFQ8JFRQbDQJdWQ0WAD8rABg/Pz8zEjk5ERIBOTkRMzMRMxEzMzEwARAzMjY1ETMRIycjBiMiJyMWFREjETMBYvapmbSSHApt3ZJaCAq0tAGF/vy70gI8+7aTp1xKqv7ABjYAAAABAHH+/ARkBhQADwAnQBIEBQABAQULAxARCAgOAQUDDgAAPzMvMxI5LxESARc5ETMRMzEwASMRIxEjEQYjIiY1EDYzIQRkdtF3PlTYy9roAjH+/Aam+VoDMxL6+wEE/gABAJMCSAGRA14ACwAVQAkABgwNCQN9WQkALysREgE5OTEwEzQ2MzIWFRQGIyImk0E8PkNEPTtCAtNBSktAQEtKAAAAAAEAHf4UAbIAAAARADFAGA8LBRANAAsNCxITDRBACw5IEBAOCAMbDgAvPzMSOS8rMxESATk5ETMRMzMRMzEwARQGIyInNRYzMjY1NCc3MwcWAbKXmEElJEhLTbtYdzWy/uNjbAtwCiczWRiwbSYAAAABAEwCSgHsBbYACgAgQA4CBgMKAwsMCQkDIAYAHgA/Mj85LxESATk5ETMzMTABMxEjETQ3BgYHJwFWlpIIHy6CRwW2/JQCNUNzHCZdZAAAAAIAQgMQAsMFxwALABcAJ0AUDAYAEgYSGBkPAAMQA0ADAwMVCR8APzPEXTIREgE5OREzETMxMAEUBiMiJjU0NjMyFgUUFjMyNjU0JiMiBgLDrZeRrKiZlav+AFhmZFpaZGRaBG2jurmkpbW2pHl3d3l5dHQAAgBQAHMDwQPFAAYADQAzQBsLCQcKBAIAAwMCCgkEDg8BCAUgDAEQDDAMAgwAL11xMy8zERIBFzkRMxEzETMRMzEwAQEnAQE3AQUBJwEBNwEDwf6jfwEf/uF/AV3+aP6mfwEf/uF/AVoCDv5lSwFfAV5K/mQb/mVLAV8BXkr+ZP//AEcAAAXjBbYAJwIXAokAAAAmAHv7AAEHAjsDKf23AAmzAwISEgA/NTUA//8ALgAABdMFtgAnAhcCTAAAACYAe+IAAQcAdANe/bcAB7ICDxIAPzUAAAD//wAgAAAGLQXJACcCFwLjAAAAJwI7A3P9twEGAHX9AAAJswIBBxIAPzU1AAACADP+dwNgBF4AGgAlAFdANwcSARkNGxkgEgUmJ18abxoCABoQGgILAxoaDyMjHX1ZIxAPCn5ZTw9fD58Prw8ETw+vD/8PAw8AL11xKwAYPysREgA5GC9fXl1dERIBFzkRMxEzMTABFRQGBwYGFRQWMzI3FwYjIiY1NDY2NzY2NTUTFCMiJjU0NjMyFgJaSmSFRn93n6s/xsnC3ChSeGc9wH8+P0k0NkkCpDV1llRvblRgbViRYrupSXFmZ1pvWCEBL4lHQklCQgAAAP//AAAAAAUbB3MCJgAkAAABBwBD/8oBUgAWuQAC/41ACRIOBQYlAg8FJgArNQErNf//AAAAAAUbB3MCJgAkAAABBwB2AIsBUgATQAsCThcTBQYlAhcFJgArNQErNQAAAP//AAAAAAUbB3MCJgAkAAABBwFLACcBUgAWuQAC//dACRsVBQYlAhsFJgArNQErNf//AAAAAAUbBzMCJgAkAAABBwFSAAwBUgAWuQAC//hACRcjBQYlAg4FJgArNQErNf//AAAAAAUbBykCJgAkAAABBwBqADkBUgAasQMCuP/8QAoOIAUGJQMCIwUmACs1NQErNTUAAwAAAAAFGwcGABIAGAAkAFZALQkTAAMZGB8NFQoAGQQZCg0EJSYVCQMKHA8QbxACCQMQIhgHaVkYGAkiAwUJEgA/Mz8SOS8rABgQxF9eXTIzMxI5ERIBFzkRMxEzETMzETMRMzIxMAEUBgcBIwMhAyMBJiY1NDYzMhYTAycGBwMBNCYjIgYVFBYzMjYDbTwzAh2/sP28rroCGzU8eGdmfgikRh4hpgFWQTIxQTo4M0AGMUVjGPqPAcX+OwVqGWRIYnV2+9gBu9t4Y/5FA8c2PT02Nj09AAAAAv/+AAAGkQW2AA8AEwBwQEAFCg4OEQEIAAAMARAEFBUKDWlZ2AoBOgoBCQoBDwAKoAoCEgMKCgEGEANpWRAQAQYFEgkTBhNpWQYDAQ5pWQESAD8rABg/KxEAMxg/ERI5LysREgA5GC9fXl1eXV1dKxESARc5ETMRMzMRMzIxMCEhESEDIwEhFSERIRUhESEBIREjBpH9B/4A3L4CtgPd/b8CGv3mAkH7TgG5dwHF/jsFtqL+OKD99gHGAqoAAAD//wB9/hQEzwXLAiYAJgAAAQcAegIEAAAAC7YBRR4YDxUlASs1AAAA//8AxwAAA/gHcwImACgAAAEHAEP/twFSABW0AQ0FJgG4/6m0ERUCCyUBKzUAKzUA//8AxwAAA/gHcwImACgAAAEHAHYAQgFSABNACwEVBSYBMxURAgslASs1ACs1AAAA//8AxwAAA/gHcwImACgAAAEHAUv/+wFSABW0ARkFJgG4//q0GRMCCyUBKzUAKzUA//8AxwAAA/gHKQImACgAAAEHAGoADgFSABdADQIBIQUmAgEADB4CCyUBKzU1ACs1NQAAAP//ADwAAAJiB3MCJgAsAAABBwBD/rkBUgAVtAENBSYBuP+wtBEVBgslASs1ACs1AP//AFIAAAKIB3MCJgAsAAABBwB2/2kBUgATQAsBFQUmAWAVEQYLJQErNQArNQAAAP////0AAAKxB3MCJgAsAAABBwFL/vkBUgAVtAEZBSYBuP/9tBkTBgslASs1ACs1AP//ADwAAAJ8BykCJgAsAAABBwBq/wsBUgAXQA0CASEFJgIBAgweBgslASs1NQArNTUAAAAAAgA9AAAFWgW2AAwAGAByQEcGBBIWFggEAA0NFAQDGRoVBgcGaVkSGAcBegcBSAcBDwdvB38HnwevBwUPB68HzwffB/8HBQsDBwcECQkRa1kJAwQWa1kEEgA/KwAYPysREgA5GC9fXl1xXV1xMysRADMREgEXOREzETMzETMRMzEwARAAISERIzUzESEgAAMQACEjESEVIREzIAVa/nf+i/55mJgBtAFVAXzC/ur+7eIBbf6TuQJSAun+mf5+AoOgApP+h/6mARgBHv4KoP4b//8AxwAABU4HMwImADEAAAEHAVIAmgFSABNACwEdBSYBCR0pCRMlASs1ACs1AAAA//8Aff/sBcMHcwImADIAAAEHAEMAdwFSABW0AhkFJgK4/6i0HSEGACUBKzUAKzUA//8Aff/sBcMHcwImADIAAAEHAHYBDgFSABNACwIhBSYCPyEdBgAlASs1ACs1AAAA//8Aff/sBcMHcwImADIAAAEHAUsAtAFSABW0AiUFJgK4//K0JR8GACUBKzUAKzUA//8Aff/sBcMHMwImADIAAAEHAVIAngFSABW0AhgFJgK4//i0IS0GACUBKzUAKzUA//8Aff/sBcMHKQImADIAAAEHAGoA0wFSABdADQMCLQUmAwIEGCoGACUBKzU1ACs1NQAAAAABAIMBDgQMBJgADgAqQBcNCgMAAgUFAAwKBA8QLwxfDH8MrwwEDAAZL10REgEXOREzETMRMzEwAQEXAQAXByYnASc2NwE3AkgBXmb+pAEgOmbnd/6qa4PX/qRrAzkBX2n+pP7cOGnnc/6maYHbAVprAAAAAAMAff/BBcMF+AATABsAIwBfQDQRABcFBwoeBRwKBQgWHxIPABQUDx8ICgUkJR8WHhcEGSEPEggFBAMNDSFpWQ0EAxlpWQMTAD8rABg/KxESABc5ERIXORESARc5ETMRMxEzETMRMxEzETMRMxEzMTABEAAhIicHJzcmERAAITIXNxcHFgMQJwEWMzISARAXASYjIgIFw/6d/sHok2J9arQBXwFHzKBffGjBw2j9cHKn6/T8P2ECjW2d7vYC3f6h/m5gi1GYxgFvAWUBiVyHVJLL/pYBCJb8Xk4BLAEm/v2SA5tI/tMAAP//ALj/7AUfB3MCJgA4AAABBwBDAEIBUgAVtAETBSYBuP+otBcbCAElASs1ACs1AP//ALj/7AUfB3MCJgA4AAABBwB2ANUBUgATQAsBGwUmATobFwgBJQErNQArNQAAAP//ALj/7AUfB3MCJgA4AAABBwFLAH8BUgAVtAEfBSYBuP/ytB8ZCAElASs1ACs1AP//ALj/7AUfBykCJgA4AAABBwBqAJgBUgAZtgIBJwUmAgG4//60EiQIASUBKzU1ACs1NQD//wAAAAAEhwdzAiYAPAAAAQcAdgA3AVIAE0ALARIFJgFEEg4HAiUBKzUAKzUAAAAAAgDHAAAEbwW2AAwAFQA8QB4JDQUFBgARBhEWFwkVa1kJCQYHBA1rWQQEBgcDBhIAPz8SOS8rERIAORgvKxESATk5ETMRMxEzMzEwARQEISMRIxEzFTMgBAEzMjY1NCYjIwRv/tP+46a4uMUBGQES/RCT3cG2xbYDDuLv/sMFtv7T/fSOnZCHAAEArv/sBLAGHwAvAFBAKxAcKCkFHAAiCxYWIhwpBDAxAgUIAwsAHxwZAxYiKRUtJV1ZLQEOFF5ZDhYAPysAGD8rABg/ERIXORESFzkREgEXOREzETMRMxEzETMxMAEUBwYGFRQWFxYWFRQGIyInNRYWMzI1NCYnJiY1NDY3NjY1NCYjIBURIxE0NjMyFgQtj09BQHWJZMW4u25An0TTUXB2aURISkGFf/7vtOTh0ugE8ItwPkkiKEJMXJ1koaxFoigusEdoR0t9Vz9pNTdcM05W3ftUBKy1vqAAAP//AF7/7APXBiECJgBEAAABBgBDkAAADrkAAv+KtComExklASs1AAD//wBe/+wD1wYhAiYARAAAAQYAdjEAAAu2AisvKhMZJQErNQD//wBe/+wD1wYhAiYARAAAAQYBS9wAAA65AAL/47QzLRMZJQErNQAA//8AXv/sA9cF4QImAEQAAAEGAVLGAAAOuQAC/+m0LzsTGSUBKzUAAP//AF7/7APXBdcCJgBEAAABBgBq5gAAELEDArj/4LQmOBMZJQErNTX//wBe/+wD1waJAiYARAAAAQYBUP0AABCxAwK4//S0LCYTGSUBKzU1AAMAXv/sBoEEXgAoADMAOgB5QEEKAB4XKQAYAy8XODgvAAM7PCMQJg0DMGBZAzc3GGRZGTcBAw83ARAGNzcmDTQHDQdeWRMNECEaYVkhJiYsXlkmFgA/KxEAMysAGD8zKxEAMxESORgvX15dX10rABgQxSsREgA5ORESARc5ETMRMzMRMxEzETMxMBMQJTc1NCYjIgcnNjYzMhYXNjYzMhIVFSESITI2NxUGBiMgJwYGIyImNxQWMzI2NTUHBgYBIgYHITQmXgH0uHF0i6g4R8tngKUrNqpwxen9QggBNViaXliYZv7dfVLGiKS4u2hWjKOZsaMDuHeICwH8fQExAU4QB0V6dlSHKDZTXVVd/vLdb/6BISueJyDnfGurmFxWo5ZjBwdqAiqhmZii//8Acf4UA5MEXgImAEYAAAEHAHoBTAAAAAu2ATEdFwMJJQErNQAAAP//AHH/7AQbBiECJgBIAAABBgBDtQAADrkAAv/AtCElAwolASs1AAD//wBx/+wEGwYhAiYASAAAAQYAdlAAAAu2AlslIQMKJQErNQD//wBx/+wEGwYhAiYASAAAAQYBS/kAAAu2AhEpIwMKJQErNQD//wBx/+wEGwXXAiYASAAAAQYAagoAAA23AwIVHC4DCiUBKzU1AAAA////1AAAAXAGIQImAPMAAAEHAEP+UQAAAA65AAH/mrQJDQIDJQErNf//AKcAAAJDBiECJgDzAAABBwB2/yQAAAALtgFtDQkCAyUBKzUAAAD///+vAAACYwYhAiYA8wAAAQcBS/6rAAAAC7YBARELAgMlASs1AAAA////6gAAAioF1wImAPMAAAEHAGr+uQAAAA23AgECBBYCAyUBKzU1AAACAHH/7ARoBh8AGwAnAGxAPBgMHBMQIiIGGQ4AHBwOEQYEKCkQEQ4WGRgGFw8ADxAPIA8DCQMLAw8PCRQJH15ZCQkDExcUAQMlXVkDFgA/KwAYPzMzEjkvKxESADkYLxE5X15dERIXORESARc5ETMRMxEzETMzETMzMTABEAAjIgA1NAAzMhc3JicFJzcmJzcWFzcXBxYSAzQmIyIGFRQWMzI2BGj+9vbh/uoBA97hXgk7w/71TeVWYkmcZuxOy5emtq+WpqConqmcAjP+5v7TAQ/i5QEHdwTWsZlwgzoze0lLiW51jP51/uiPpq2zpbPGAAD//wCuAAAETAXhAiYAUQAAAQYBUhAAAAu2AQ0eKgoUJQErNQD//wBx/+wEaAYhAiYAUgAAAQYAQ9AAAA65AAL/tbQdIQcAJQErNQAA//8Acf/sBGgGIQImAFIAAAEGAHZaAAALtgI+IR0HACUBKzUA//8Acf/sBGgGIQImAFIAAAEGAUsMAAAOuQAC//60JR8HACUBKzUAAP//AHH/7ARoBeECJgBSAAABBgFS9QAAC7YCAiEtBwAlASs1AP//AHH/7ARoBdcCJgBSAAABBgBqGQAAELEDArj//rQYKgcAJQErNTUAAwBmAPgEKwSqAAMADQAXAFFANw4EEggDCAQABBgZBqALAQALMAtAC3ALoAuwC/ALBwsLFT8QARAQAC8BXwF/Aa8BzwHvAf8BBwEAL10zMy9dMzIvXXEzERIBFzkRMxEzMTATNSEVATQzMhUUBiMiJhE0MzIVFAYjIiZmA8X9qnNwQi4wQ3NwQi4wQwKJkpL+7Ht7Qjs7Avx7e0I7OwAAAAMAcf+8BGgEhwATABoAIgBUQC8dDxYFFAoFCBIPABsbDxcIBwoGIyQXHRYeBCAZDxIIBQQDDQ0ZXVkNEAMgXVkDFgA/KwAYPysREgAXORESFzkREgEXOREzETMRMxEzETMRMzEwARAAIyInByc3JhEQADMyFzcXBxYFFBcBJiMgATQnARYzMjYEaP7w8JdxUnZcgwEM8pd1UHldgfzDMQHDSWz+wQKDL/49RWydpAIn/vP+0kNzUH+cAQABDAErSnNOgZv2pmACcjT+YJhk/Y0v1wD//wCi/+wERAYhAiYAWAAAAQYAQ8IAAA65AAH/oLQaHhQKJQErNQAA//8Aov/sBEQGIQImAFgAAAEGAHZ1AAALtgFTHhoUCiUBKzUA//8Aov/sBEQGIQImAFgAAAEGAUsUAAAOuQAB//+0IhwUCiUBKzUAAP//AKL/7AREBdcCJgBYAAABBgBqIQAAELECAbj//7QVJxQKJQErNTX//wAC/hQEFAYhAiYAXAAAAQYAdhcAAAu2AV0hHQAKJQErNQAAAgCu/hQEewYUABUAIQBBQCITGhUMBA8PEAYfEB8iIwsACQMRABAbAxZdWQMQCR1dWQkWAD8rABg/KwAYPz8REjk5ERIBOTkRMxEzERczMTABNjYzMhIREAIjIicjFhURIxEzERQHJSIGBxUUFjMgETQmAWRDp2rV7u7V3ngMDLS0BgE+oZUClKYBJY8DtltN/tX+9P7z/tKfhCj+NQgA/jZGThGzxiffxwGo0M4AAP//AAL+FAQUBdcCJgBcAAABBgBquQAAELECAbj//7QYKgAKJQErNTX//wAAAAAFGwa8AiYAJAAAAQcBTQBCAVIAH0AWAg8REAUGJQJ/EY8RnxGvEc8RBREFJgArXTUBKzUAAAD//wBe/+wD1wVqAiYARAAAAQYBTfkAAA65AAL//bQpKBMZJQErNQAA//8AAAAABRsHPgImACQAAAEHAU4ALwFSABNACwIAERkFBiUCDgUmACs1ASs1AAAA//8AXv/sA9cF7AImAEQAAAEGAU7oAAAOuQAC/+60KTETGSUBKzUAAP//AAD+PQUbBbwCJgAkAAABBwFRA5wAAAAOuQAC/+O0GhsAByUBKzX//wBe/j0EBgRcAiYARAAAAQcBUQKRAAAAC7YCADIzABolASs1AAAA//8Aff/sBM8HcwImACYAAAEHAHYBCAFSABNACwEhBSYBsyEdDxUlASs1ACs1AAAA//8Acf/sA5MGIQImAEYAAAEGAHZIAAALtgGXIBwDCSUBKzUA//8Aff/sBM8HcwImACYAAAEHAUsAvgFSABNACwElBSYBdiUfDxUlASs1ACs1AAAA//8Acf/sA6UGIQImAEYAAAEGAUvtAAALtgFJJB4DCSUBKzUA//8Aff/sBM8HNwImACYAAAEHAU8CEgFSABNACwEhBSYBdRgeDxUlASs1ACs1AAAA//8Acf/sA5MF5QImAEYAAAEHAU8BTgAAAAu2AVUXHQMJJQErNQAAAP//AH3/7ATPB3MCJgAmAAABBwFMAL4BUgATQAsBIgUmAXYkHw8VJQErNQArNQAAAP//AHH/7AOtBiECJgBGAAABBgFM9QAAC7YBUSMeAwklASs1AP//AMcAAAVaB3MCJgAnAAABBwFMAFgBUgAVtAIbBSYCuP+mtB0YBQAlASs1ACs1AP//AHH/7AWPBhQCJgBHAAABBwI4AxQAAAALtgJcISEODiUBKzUAAAD//wA9AAAFWgW2AgYAkgAAAAIAcf/sBNkGFAAbACgAfEBIFRcOJiYDExcXCQwQAxkZHwMDKSoIGgAGFg4PDl9ZEwgPGA8CEQ8PHw8CFAMPDwYRABgVBiNdWQAGEAYgBgMJAwYQABxdWQAWAD8rABg/X15dKwAYPz8SOS9fXl1eXTMrEQAzERI5ORESARc5ERczMxEzETMRMxEzMTAFIgIREBIzMhczJiY1NSE1ITUzFTMVIxEjJyMGJzI2NTU0JiMiBhUUFgIz1uzr1993DQMK/kwBtLScnJMbCHPCo5aYpYyVkBQBJgENAQ8BKqIObih9h7a2h/spk6eVscoj5r7W0MXXAP//AMcAAAP4BrwCJgAoAAABBwFNAAQBUgAdQBQBfw+PD58Prw8EDwUmAQAPDgILJQErNQArXTUA//8Acf/sBBsFagImAEgAAAEGAU0IAAALtgIdHx4DCiUBKzUA//8AxwAAA/gHPgImACgAAAEHAU4ADAFSABNACwEMBSYBCg8XAgslASs1ACs1AAAA//8Acf/sBBsF7AImAEgAAAEGAU79AAALtgIUHycDCiUBKzUA//8AxwAAA/gHGgImACgAAAEHAU8BZgE1ABNACwEVBSYBEAwSAgslASs1ACs1AAAA//8Acf/sBBsF5QImAEgAAAEHAU8BUgAAAAu2AhUcIgMKJQErNQAAAP//AMf+PQP4BbYCJgAoAAABBwFRAm0AAAALtgHaFhEBACUBKzUAAAD//wBx/loEGwReAiYASAAAAQcBUQJiAB0ADrkAAgEBtCYhAxIlASs1//8AxwAAA/gHcwImACgAAAEHAUwADAFSABNACwEWBSYBChgTAgslASs1ACs1AAAA//8Acf/sBBsGIQImAEgAAAEGAUz9AAALtgIVKCMDCiUBKzUA//8Aff/sBTsHcwImACoAAAEHAUsA+AFSABNACwEpBSYBeikjCAIlASs1ACs1AAAA//8Acf4UBD0GIQImAEoAAAEGAUsKAAALtgIRNjAUHSUBKzUA//8Aff/sBTsHPgImACoAAAEHAU4A+AFSABNACwEcBSYBeR8nCAIlASs1ACs1AAAA//8Acf4UBD0F7AImAEoAAAEGAU4OAAALtgIULDQUHSUBKzUA//8Aff/sBTsHNwImACoAAAEHAU8CWAFSABNACwElBSYBhRwiCAIlASs1ACs1AAAA//8Acf4UBD0F5QImAEoAAAEHAU8BWgAAAAu2AgwpLxQdJQErNQAAAP//AH3+OwU7BcsCJgAqAAABBwI5ASUAAAALtgE7JSEIAiUBKzUAAAD//wBx/hQEPQYhAiYASgAAAQcCOgCFAAAAC7YCLS4yFB0lASs1AAAA//8AxwAABSUHcwImACsAAAEHAUsAlgFSABW0ARkFJgG4//60GRMGCyUBKzUAKzUA//8ArgAABEwHqgImAEsAAAEHAUsAIQGJABNACwEkAiYBAiQeChYlASs1ACs1AAAAAAIAAAAABewFtgATABcAbUA8EgMXDw8AEAcLCwQUDAkMEAMYGQoWEhMSbFkHAwATEBNgEwMMAxMTEAEXDmlZMBcBkBcBFxcQBQEDDBASAD8zPzMSOS9dcSsREgA5GC9fXl0zMysRADMzERIBFzkRMzMzETMRMzMRMzMyMTATNTMVITUzFTMVIxEjESERIxEjNQE1IRXHuALuuMfHuP0SuMcEbf0SBMPz8/PzlPvRAqr9VgQvlP6J4+MAAAAAAQASAAAETAYUAB4Ac0BBEgALCRAUFgMICA0JHgAJAB8gFgkaEwsMC19ZEAgMGAwCEQ8MARQDDAwaDhoEXVm/GgEAGhAaIBoDGhoJDgAACRUAPzM/EjkvXV0rERIAORgvX15dXl0zKxEAMxESORESATk5ETMRMzMRFzMRMxEzMTAhETQmIyIGFREjESM1MzUzFSEVIRUUBzM2NjMyFhURA5p3f6mZtJyctAGy/k4KDDW3bMfJApaFg7nV/fAE1Ye4uIeyWEBVVcHS/V7////kAAAC1AczAiYALAAAAQcBUv7iAVIAE0ALAQwFJgECFSEGCyUBKzUAKzUAAAD///+SAAACggXhAiYA8wAAAQcBUv6QAAAAC7YBAg0ZAgMlASs1AAAA//8ALAAAAowGvAImACwAAAEHAU3/AQFSAB1AFAF/D48Pnw+vDwQPBSYBAg8OBgslASs1ACtdNQD////cAAACPAVqAiYA8wAAAQcBTf6xAAAAC7YBBAcGAgMlASs1AAAA//8AHgAAApsHPgImACwAAAEHAU7+/wFSABNACwEMBSYBAg8XBgslASs1ACs1AAAA////ygAAAkcF7AImAPMAAAEHAU7+qwAAAAu2AQAHDwIDJQErNQAAAP//AFL+PQJiBbYCJgAsAAABBwFRAJEAAAALtgEDFhIGCyUBKzUAAAD//wAz/j0BhQXlAiYATAAAAQYBURAAAAu2AgocHQEAJQErNQD//wBSAAACYgc3AiYALAAAAQcBTwBQAVIAE0ALARUFJgEADBIGCyUBKzUAKzUAAAAAAQCuAAABYgRKAAMAFkAJAAEBBAUCDwEVAD8/ERIBOREzMTAhIxEzAWK0tARKAAD//wBS/nsEKwW2ACYALAAAAQcALQK2AAAAC7YBJRQUChslASs1AAAA//8AoP4UA38F5QAmAEwAAAEHAE0CDAAAABCxAwK4//u0GBgAKyUBKzU1AAD///9g/nsCdQdzAiYALQAAAQcBS/69AVIAE0ALAQMbFQkKJQEbBSYAKzUBKzUAAAD///+P/hQCXQYhAiYCNwAAAQcBS/6lAAAAFrkAAf/7QAkbFQkKJQEbESYAKzUBKzX//wDH/jsE9AW2AiYALgAAAQcCOQCJAAAADrkAAf+etBYSBgAlASs1//8Arv47BDMGFAImAE4AAAEGAjkxAAAOuQAB/7O0GRUMBiUBKzUAAAABAK4AAAQzBEoADgA1QBsNCwcHCAIFAwEFCAQPEAUCDQMGBggACQ8ECBUAPzM/MxI5ERczERIBFzkRMxEzETMzMTABMwEBIwEHESMRMxEUBzcDOdn+YQHA1/6Yh7+/DVQESv4a/ZwB8G/+fwRK/uOLiWYA//8AxwAAA/4HcwImAC8AAAEHAHb/bgFSABNACwEJDw4BAiUBDwUmACs1ASs1AAAA//8ApQAAAkEHrAImAE8AAAEHAHb/IgGLABNACwFrDQkCAyUBDQImACs1ASs1AAAA//8Ax/47A/4FtgImAC8AAAEGAjkzAAAOuQAB/8O0DwsBBSUBKzUAAP//AFz+OwFiBhQCJgBPAAABBwI5/u0AAAALtgEABwgBACUBKzUAAAD//wDHAAAD/gW3AiYALwAAAQcCOAEl/6MAEkAKAQkDAeUJCgEEJQErNQA/Nf//AK4AAAK2BhQCJgBPAAABBgI4OwAAC7YBogcHAwMlASs1AP//AMcAAAP+BbYCJgAvAAABBwFPAgj9aQALtgFTBgwCBCUBKzUAAAD//wCuAAACvwYUACYATwAAAQcBTwFM/TgAC7YBigQEAAAlASs1AAAAAAEAGwAAA/4FtgANAEhAKAMABwsLBAANCQADDg8DAQQKBwkGDwgfCAIJAwhAAgIABQMAC2lZABIAPysAGD8SOS8azV9eXRc5ERIBFzkRMzMRMxEzMTAzEQcnNxEzESUXBREhFcdlR6y4ARlJ/p4CfwH6OXpnAxT9WKaBzf4+pAAAAf/uAAACIwYUAAsAM0AaAg0IAAQECQUFDA0GCAkDAAIGAQcHBQoABRUAPz8SOS/NFzkREgE5ETMzETMyETMxMAE3FwcRIxEHJzcRMwFUg0zPtGlJsrQDZlt5jP1EAkhCeXMDIgAAAP//AMcAAAVOB3MCJgAxAAABBwB2AQgBUgATQAsBHQUmAU4dGQkTJQErNQArNQAAAP//AK4AAARMBiECJgBRAAABBgB2fQAAC7YBUR4aChQlASs1AP//AMf+OwVOBbYCJgAxAAABBwI5ANEAAAAOuQAB/7m0HRkJEyUBKzX//wCu/jsETAReAiYAUQAAAQYCOVgAAA65AAH/zbQeGgoUJQErNQAA//8AxwAABU4HcwImADEAAAEHAUwArAFSABNACwEeBSYBACAbCRMlASs1ACs1AAAA//8ArgAABEwGIQImAFEAAAEGAUwjAAALtgEEIRwKFCUBKzUA//8AAwAABN0FtgAnAFEAkQAAAQYCB+oAAA65AAH/drQWFgoKJQErNQABAMf+ewVOBbYAGQA7QB4KDQ0OFAgXEhIIAg4EGhsJEg4VDwMIDhIABWlZACIAPysAGD8zPzMSOTkREgEXOREzETMRMxEzMTABIic1FjMyNjcBIxYVESMRMwEzJjURMxEUBgPNYjpHVWZtAvzGCBGq1QMMCA6sx/57G5sUdW4Evv+m/OcFtvttmv8C+vpWxM0AAAAAAQCu/hQETgReAB0APEAfEw8PEBsHBwIQAx4fExAXEQ8QFRcLXVkXEAAFXVkAGwA/KwAYPysAGD8/ERI5ERIBFzkRMxEzETMxMAEiJzUWMzI1ETQmIyIGFREjETMXMzY2MzIWFREUBgMnVjs8Pop3faqatJEdCjS0bsrIj/4UGZEUrANrhYG70f3FBEqWUli/0vyPmq4AAP//AH3/7AXDBrwCJgAyAAABBwFNAMUBUgAdQBQCfxuPG58brxsEGwUmAgAbGgYAJQErNQArXTUA//8Acf/sBGgFagImAFIAAAEGAU0QAAAOuQAC//+0GxoHACUBKzUAAP//AH3/7AXDBz4CJgAyAAABBwFOAMEBUgAVtAIYBSYCuP//tBsjBgAlASs1ACs1AP//AHH/7ARoBewCJgBSAAABBgFODAAADrkAAv/9tBsjBwAlASs1AAD//wB9/+wFwwdzAiYAMgAAAQcBUwEZAVIAF0ANAwIrBSYDAkchJwYAJQErNTUAKzU1AAAA//8Acf/sBGgGIQImAFIAAAEGAVNcAAANtwMCPiEnBwAlASs1NQAAAAACAH3/7AbyBc0AFAAfAG5AQBgGDxMTHQ0AABEdBgQgIQ8SaVnYDwE6DwEJDwEPAA+gDwISAw8PAQsBE2lZARILDmlZCwMJFWlZCQQDG2lZAxIAPysAGD8rABg/KwAYPysREgA5GC9fXl1eXV1dKxESARc5ETMRMxEzETMxMCEhBiMgABEQACEyFyEVIREhFSERIQEiAhEQEjMyNxEmBvL89WZg/rv+oQFZAUFqWgMX/bQCJf3bAkz8NfL4+PB1VlUUAYoBaQFnAYcXov44oP32BIn+0v7g/t/+zyMEXCEAAAMAb//sBycEXAAeACoAMQBtQDscFQ4CAhYfCBYlFS8vJQgDMjMOAgULLhZeWRkuAQMPLgEQBi4uBQsrKAsoXVkRCxAAGGFZBSJdWQAFFgA/MysrABg/MysRADMREjkYL19eXV9dKxESADk5ERIBFzkRMxEzETMRMxEzETMxMAUgJwYGIyIAERAAMzIWFzY2MzISFRUhEiEyNjcVBgYBFBYzMjY1NCYjIgYlIgYHITQmBZj+3oA/0Yji/vMBCO2CzD48wH7N8P0nCAFEWppoXZr7LJKjoJOVoqGQBD17jwwCFoEU43FyATQBBwEKAStyb210/vfkbf6DHy2eKB8CO9DW0c3W0tPVn5mXof//AMcAAATbB3MCJgA1AAABBwB2AHsBUgAVtAIfBSYCuP/7tB8bDBMlASs1ACs1AP//AK4AAAMvBiECJgBVAAABBgB24gAAC7YBRBsXDAIlASs1AP//AMf+OwTbBbYCJgA1AAABBwI5AIEAAAAOuQAC/6K0HxsMEyUBKzX//wBi/jsDLwReAiYAVQAAAQcCOf7zAAAAC7YBBxUWCwolASs1AAAA//8AxwAABNsHcwImADUAAAEHAUwAHQFSABW0AiAFJgK4/6q0Ih0MEyUBKzUAKzUA//8AfgAAAzIGIQImAFUAAAEHAUz/egAAAA65AAH/6rQeGQwCJQErNf//AGj/7AQEB3MCJgA2AAABBwB2AE4BUgATQAsBcS8rExglAS8FJgArNQErNQAAAP//AGj/7AN5BiECJgBWAAABBgB27wAAC7YBWy0pEhclASs1AP//AGj/7AQEB3MCJgA2AAABBwFL/+oBUgATQAsBGjMtExglATMFJgArNQErNQAAAP//AGj/7AN5BiECJgBWAAABBgFLmQAAC7YBEjErEhclASs1AP//AGj+FAQEBcsCJgA2AAABBwB6ASsAAAAOuQAB/9y0LCYGACUBKzX//wBo/hQDeQReAiYAVgAAAQcAegDdAAAADrkAAf/UtCokEgAlASs1//8AaP/sBAQHcwImADYAAAEHAUz/5gFSABNACwEWMi0TGCUBMAUmACs1ASs1AAAA//8AaP/sA3kGIQImAFYAAAEGAUylAAALtgEeMCsSFyUBKzUA//8AFP47BFwFtgImADcAAAEGAjkZAAAOuQAB//y0CwwBACUBKzUAAP//ACH+OwK2BUYCJgBXAAABBgI5swAAC7YBFhobCQQlASs1AP//ABQAAARcB3MCJgA3AAABBwFM/94BUgATQAsBEgUmAQQUDwQGJQErNQArNQAAAP//ACH/7ALuBhQCJgBXAAABBgI4cwAAC7YBhyAgEBAlASs1AAABABQAAARcBbYADwBHQCYHCwsADAUJDA4CBRARCg4PDmtZBw8PAQsDDw8DDBIGAgMCaVkDAwA/KxEAMxg/EjkvX15dMysRADMREgEXOREzMxEzMTABESE1IRUhESEVIREjESE1Adv+OQRI/jcBMP7QuP7PAzEB4aSk/h+X/WYCmpcAAAABACH/7AK2BUYAHQBkQDUKDggTFxsbDAgVAwMZCAMeHxoKCwpfWRcAC2ALAg0DCwsGExEREBMNFhMWZFkTDwYAXVkGFgA/KwAYPysRADMRMzMYLxESOS9fXl0zKxEAMxESARc5ETMRMzMRMzMRMzMxMCUyNxUGBiMgETUjNTM1IzU3NzMVIRUhFSEVIRUUFgIhVUAaazn+xIuLm51IawE9/sMBK/7VVX8XigwUAV7ziPlWSOr8jPmI6Wpr//8AuP/sBR8HMwImADgAAAEHAVIAcwFSABNACwESBSYBARsnCAElASs1ACs1AAAA//8Aov/sBEQF4QImAFgAAAEGAVL9AAALtgEEHioUCiUBKzUA//8AuP/sBR8GvAImADgAAAEHAU0AkQFSAB1AFAF/FY8VnxWvFQQVBSYBABUUCAElASs1ACtdNQD//wCi/+wERAVqAiYAWAAAAQYBTRsAAAu2AQMYFxQKJQErNQD//wC4/+wFHwc+AiYAOAAAAQcBTgCLAVIAFbQBEgUmAbj//bQVHQgBJQErNQArNQD//wCi/+wERAXsAiYAWAAAAQYBThQAAA65AAH//7QYIBQKJQErNQAA//8AuP/sBR8H2wImADgAAAEHAVAAngFSACCyAgEVuP/AQA4JC0gVBSYCAQAYEggBJQErNTUAKys1NQAA//8Aov/sBEQGiQImAFgAAAEGAVAnAAANtwIBAhsVFAolASs1NQAAAP//ALj/7AUfB3MCJgA4AAABBwFTAOUBUgAXQA0CASUFJgIBSBshCAElASs1NQArNTUAAAD//wCi/+wERAYhAiYAWAAAAQYBU3EAAA23AgFMHiQUCiUBKzU1AAAA//8AuP49BR8FtgImADgAAAEHAVECIwAAAAu2AQQcGAgBJQErNQAAAP//AKL+PQRpBEoCJgBYAAABBwFRAvQAAAAOuQAB//y0ISILCiUBKzX//wAZAAAHVgdzAiYAOgAAAQcBSwFWAVIAFbQBJgUmAbj//bQmIAkYJQErNQArNQD//wAXAAAGMwYhAiYAWgAAAQcBSwDHAAAAC7YBAC0nCR4lASs1AAAA//8AAAAABIcHcwImADwAAAEHAUv/5gFSABNACwEWBSYBABYQBwIlASs1ACs1AAAA//8AAv4UBBQGIQImAFwAAAEGAUuzAAALtgEGJR8ACiUBKzUA//8AAAAABIcHKQImADwAAAEHAGr/9QFSABdADQIBHgUmAgECCRsHAiUBKzU1ACs1NQAAAP//AE4AAAREB3MCJgA9AAABBwB2AEIBUgATQAsBSRMPBQYlARMFJgArNQErNQAAAP//AFAAAANzBiECJgBdAAABBgB27wAAC7YBVRMPBQYlASs1AP//AE4AAAREBzcCJgA9AAABBwFPAT0BUgAWuQAB//xACQoQBQYlARMFJgArNQErNf//AFAAAANzBeUCJgBdAAABBwFPANsAAAAOuQAB//m0ChAFBiUBKzX//wBOAAAERAdzAiYAPQAAAQcBTP/vAVIAE0ALAQMWEQUGJQEUBSYAKzUBKzUAAAD//wBQAAADcwYhAiYAXQAAAQYBTIgAAA65AAH/+7QWEQUGJQErNQAAAAEArgAAAuUGHwAMACFADwoOBAUFDQ4FFQgAXVkIAQA/KwAYPxESATkRMxEzMTABIgYVESMRECEyFwcmAhdeV7QBa2RoL1oFiXV2+2IEngGBJ44fAAEAvv4UBBQFywAfAERAJBkdHQwIEhsICgIFICEJHBkcZFkMGRkQABAVXVkQBAAFXVkAGwA/KwAYPysREgA5GC8zKxEAMxESARc5ETMzETMxMAEiJzUWMzI2NREjNTc1NDYzMhcHJiMiBhUVIRUhERQGAUpJQ0Y7XEzX16K5XXUtZjleTgEU/vCk/hQTlRJgcwPCVD6FwbQrjCFkeY2M/D67rgAABAAAAAAFHweqABEAGAAiAC4AeUBHBDAJEgAYKQwDFQAjHiMVGQoMBi8wIUANFkghIRwmAA9gDwIJAw8PLBwYB2lZGBgKAxUDLEAsUCwCLAnvHAEcQA0SSBwFCRIAPzMvK10SOV0RFzM5LysREgA5GC9fXl0zEjkvKxESARc5ETMRMxEzMxEzMhEzMTABFAYHASMDIQMjASY1NDYzMhYTAyYnBgcDEzY2NzMVBgYHIxM0JiMiBhUUFjMyNgNxNi0CEb+o/aSgvAIQZHhnZ38SrBsvHyiqjzlfFtkesT9500AzMUE7NzNABZhBXhr7IQGJ/ncE3TaDYnd4/DYBqD2Sa2j+XASHQ4wnECqkKv70Nzs7NzY9OwAAAAUAXv/sA9cHqgAaACUAMQA9AEcAjkBXJjI4LBMjIwgLHhoBAR4ILDI+QwdJSO9C/0ICQkAJDkhCQD5QPgI+NTsfLwEfLy8vzy8DLwApECkgKQMJAykWAgAWCx9gWQsLFgAVFg9eWRYQBRteWQUWAD8rABg/KwAYPxI5LysREgA5GBDWX15d1F1xMzLWXcQrXRESARc5ETMRMxEzETMRMxEzMTAhJyMGBiMiJjUQJTc1NCYjIgYHJzY2MzIWFRElMjY1NQcGBhUUFgEUBiMiJjU0NjMyFgc0JiMiBhUUFjMyNgM1NjY3IRUGBgcDVCMIUqN8orgCD7psd1ebRDdTxGDHwv4Kl62iva1pAal9Zmd5eGhlfnFBMTJBOzgzP+MuahYBDBWkgJxnSaqbAU4QB0F9dzQghywysMD9FH2jlmMHB2pyVlwFN2V2dmNhdnZhNj09NjY9PQFdECp4HwwYaUQAAAD////+AAAGkQdzAiYAiAAAAQcAdgJUAVIAFbQCHQUmArgBXbQdGQUPJQErNQArNQD//wBe/+wGgQYhAiYAqAAAAQcAdgGLAAAAC7YDbERAABclASs1AAAA//8Aff/BBcMHcwImAJoAAAEHAHYBGwFSABNACwMtBSYDTC0pCgAlASs1ACs1AAAA//8Acf+8BGgGIQImALoAAAEGAHZYAAALtgM8LCgKACUBKzUA//8AaP47BAQFywImADYAAAEGAjkXAAAOuQAB/9O0LysGACUBKzUAAP//AGj+OwN5BF4CJgBWAAABBgI53AAADrkAAf/etC0pEgAlASs1AAAAAQEEBNkDuAYhAA0AKkAXDQcODwoNDwNvAwIDAwigDQEPDV8NAg0AL11dMzMvXRI5ERIBOTkxMAE2NjczFhcVIyYnBgcjAQR9Zxi4NMx/WoWDWHsE8IaAK2XMFzWDgDgAAQEEBNkDuAYhAAwAKkAXDAcNDgMKBQ8AbwACAACgCgEPCl8KAgoAL11dMy9dMhE5ERIBOTkxMAEzFhc2NzMVBgcjJicBBHtyaX5hf80zuDzABiFKc34/G81gZscAAAAAAQErBNkDiwVqAAMAIEATAwIEBQMPAC8AXwB/AJ8AzwAGAAAvXTIREgE5OTEwASEVIQErAmD9oAVqkQAAAQEfBNkDnAXsAA0ALEAaAwsODwoPAx8DLwOfAwQDAwegAAEPAF8AAgAAL11dMjIvXTMREgE5OTEwASImJzMWFjMyNjczBgYCWI2jCW4IVHNlYghxDawE2YqJRzs/Q4OQAAAAAAEAoAUAAXMF5QALACRAFQAGDA0DzwnvCQIACSAJAjAJgAkCCQAvXXFdMxESATk5MTATNDYzMhYVFAYjIiagPS0qPz8qLT0Fczw2Njw7ODgAAgFtBNkDLwaJAAsAFwAwQBkSBgAMBgwYGQ8JHwkBCcAVoAMBDwNfAwIDAC9dXTMazHEvMhESATk5ETMRMzEwARQGIyImNTQ2MzIWBzQmIyIGFRQWMzI2Ay99Zmd4eGdlfnFBMTJBOzgzPwW0ZXZ1ZGJ1dmE2PT02Nj09AAEAI/49AXUAAAAPACBADg0AAAoKBhARAyAIAQgMAC8vXTMREgE5OREzETMxMBcUFjMyNxUGIyI1NDczBga2MSssN0U606B/RkbuLi4NcxPBi3dCbQAAAAABAQIE2QPyBeEAFwAwQBsJFRgZFAWvDAEMgAkRYAABoADAAAIPAM8AAgAAL11dcjIyGs1dMjIREgE5OTEwASIuAiMiBgcjNjYzMh4CMzI2NzMGBgMSK1JPSSIxMg5oDHRhLVVOSCAwMQ9nDHQE2yUrJTs8eowlKyU7PHePAAAAAAIA3wTZA74GIQAJABMAK0AZDwUTCQQUFQ0PA28DAgMDE6AJAQ8JXwkCCQAvXV0zMy9dMxESARc5MTATNjY3MxUGBgcjJTY2NzMVBgYHI98jaCfFIa1CZwFpL2oZxCGtQmYE8i6xUBU4xDcZQbY4FTjENwAAAQH4BNkDFAZxAAkAHEAOCQUKCwOgCQEPCV8JAgkAL11dxBESATk5MTABNjY3MxUGBgcjAfgdNQrAD2k4bAT2S+dJFz/qWAAAAwEQBQwDjQa0AAgAEwAeAD1AIg4JGRQEFAgJBB8gAoAICBEWCwsczxEBABEgEQIwEYARAhEAL11xXTMzETMSOS8azBESARc5ETMRMzEwATY3MxUGBgcjJzQzMhYVFAYjIiYlNDMyFhUUBiMiJgH+OCTFHXE9Vu5fJjg4Jik2AcFeJTkyLCo0BYWPoBQ7rUsGZC81NTIyNWQvNS06MgAA//8AAAAABRsGCAImACQAAAEHAVT+HP+XABSzAhEAArj+9bQSEgUFJQErNQA/NQAA//8AkwJIAZEDXgIGAHkAAP///9AAAAR1BggAJgAofQABBwFU/dj/lwAUswEPAAG4/6i0ERECAiUBKzUAPzUAAP///9AAAAW4BggAJwArAJMAAAEHAVT92P+XABSzAQ8AAbj/krQREQYGJQErNQA/Nf///94AAANQBggAJwAsAO4AAAEHAVT95v+XABSzAQ8AAbj/urQREQYGJQErNQA/Nf///+L/7AYHBggAJgAyRAABBwFU/er/lwASQAoCGwACPRwcBgYlASs1AD81////zgAABZMGCAAnADwBDAAAAQcBVP3W/5cAFLMBDAABuP/etA4OBwclASs1AD81////4gAABjgGCAAmAXZCAAEHAVT96v+XABJACgEjAAE5JSUNDSUBKzUAPzX////m/+wCoAa0AiYBhgAAAQcBVf7WAAAAEEAJAwIBIxkpDwAlASs1NTUAAP//AAAAAAUbBbwCBgAkAAD//wDHAAAExQW2AgYAJQAAAAEAxwAABAAFtgAFAB9ADgMEBAEGBwQSBQJpWQUDAD8rABg/ERIBOTkRMzEwARUhESMRBAD9f7gFtqT67gW2AP//ACkAAAR9BbYCBgIoAAD//wDHAAAD+AW2AgYAKAAA//8ATgAABEQFtgIGAD0AAP//AMcAAAUlBbYCBgArAAAAAwB7/+wFwwXNAAMADwAbAFpANxAKBBYWAgMKBBwdAANpWRgAAUoAAXoAAUkAAW8AfwACDwCvAAILAwAABw0NGWlZDQQHE2lZBxMAPysAGD8rERIAORgvX15dcV1dcXErERIBFzkRMxEzMTABIRUhJRAAISAAERAAISAAARASMzISERACIyICAekCa/2VA9r+m/7B/rv+oQFfAUcBPQFl+3r27O/y9Ovt9wM3n0X+of5uAYoBaQFlAYn+c/6d/tz+0gEtASUBJQEn/tj//wBSAAACYgW2AgYALAAA//8AxwAABPQFtgIGAC4AAAABAAAAAATbBbYACgAaQAsJAQsMBQkKAwIJEgA/Mz8SORESATkyMTABASMBJicGBwEjAQLNAg7C/rxJIhRS/r/DAgwFtvpKA5rPhWPv/GQFtgAAAP//AMcAAAZ7BbYCBgAwAAD//wDHAAAFTgW2AgYAMQAAAAMASAAABCcFtgADAAcACwBMQC4JBgIDBwoGDA0AA2lZ2AABOgABCQABDwAAoAACEgMAAAoEBAdpWQQDCgtpWQoSAD8rABg/KxESADkYL19eXV5dXV0rERIBFzkxMBMhFSEDIRUhARUhNcMC6f0XUgON/HMDtvwhA0ygAwqi+46iogAA//8Aff/sBcMFzQIGADIAAAABAMcAAAUQBbYABwAlQBEEBQABBQEICQEFEgYDaVkGAwA/KwAYPzMREgE5OREzETMxMCEjESERIxEhBRC2/SW4BEkFFPrsBbYAAAD//wDHAAAEbwW2AgYAMwAAAAEASgAABF4FtgANAEFAIgMACAoJAg0GAgoABQ4PCQIABAMHBAdpWQQDAQsAC2lZABIAPysRADMYPysRADMREjk5ERIBFzkRMxEzETMxMDM1AQE1IRUhJwEBJSEVSgHf/i0Dzf1mYAHN/h4BTgH8mAJkAiGZpAL96v2iAqIAAAD//wAUAAAEXAW2AgYANwAA//8AAAAABIcFtgIGADwAAAADAGj/7AYEBcsAGQAiACsAZUA3JxQCGg0NGSsOBx4eDhQDLC0iJBgka1kCABgQGAIQAxgYDgAaKgwQECprWVAQYBACEBAOAAQOEwA/PxE5L10rEQAzETMREjkYL19eXTMrEQAzERIBFzkRMxEzMzMRMzMRMzEwATMVMzIWFhUUAgQjIxUjNSMiJAI1NDY2MzMTMzI2NTQmKwMiBhUUFjMzAtu2RK79hJT++rIntiuy/vySiP6sQbYZxdvLtji2N7XM2sgWBcu0i/iepf7+guHhhQECopv5jfxN1720z9GyvdcAAP//AAgAAASoBbYCBgA7AAAAAQBtAAAGAgW2ABsAREAjCgcQAAANARYTEwEHAxwdEAwbAwMMa1lvAwEDAwEUDggDARIAPz8zMxI5L10rEQAzETMREgEXOREzETMzETMRMzEwISMRIyIkJjURMxEQITMRMxEzIBERMxEUBgQjIwOPtC26/v+GuAGcGrQdAZq8jv79sy8BvIPwpAHj/iH+gwNc/KQBeQHj/h+m93wAAQBOAAAF9gXNAB8ARUAkFh0KAxgTCAMNEx0dGQcNBCAhEABpWRAEGgYIFgkICWlZGQgSAD8zKxEAMxI5ORg/KxESARc5ETMRMzMRMxEzETMxMAEiBhUUEhcVITUhJgI1EAAhIAARFAIHIRUhNTYSNTQCAyHq8aWx/bIBbJegAWQBOgE+AWKhlQFr/bKxp/MFK//34P6+gJOidAFYzQE0AV7+pP7Mzv6mc6KTfwFH3PYBAAD//wA8AAACfAcpAiYALAAAAQcAav8LAVIAF0ANAgEhBSYCAQIMHgYLJQErNTUAKzU1AAAA//8AAAAABIcHKQImADwAAAEHAGr/8wFSABdADQIBHgUmAgEACRsHAiUBKzU1ACs1NQAAAP//AHH/7ATNBnECJgF+AAABBgFUHwAAC7YCNTYxDxklASs1AP//AFj/7AOYBnECJgGCAAABBgFU0gAAC7YBYC8rEB0lASs1AP//AK7+FARMBnECJgGEAAABBgFUPwAAC7YBSB4aChQlASs1AP//AKj/7AKgBnECJgGGAAABBwFU/swAAAAOuQAB//m0GRgPACUBKzX//wCi/+wEeQa0AiYBkgAAAQYBVTsAABBACQMCAR0eLgQPJQErNTU1AAIAcf/sBM0EXgALACwAQ0AiGSMdCQ8dKSkEDwMtLioVDBIYDxIHXVkSECAADABdWSYMFgA/MysRADMYPysAGD8REjk5ERIBFzkRMxEzETMzMTAlMjY1NTQmIyARFBYXIgIREBIzMhYXMzY3MwYCFREUFjMyNxUGBiMiJicjBgYCUqWSlaT+24171O7033mgNg0YKY4YHDEjIB4QQSJXWhEPPKWBvtgM4cP+WM7QlQEqAQsBEgErVFRcOEX+/Wb+Vj80CoMJEVZRV1AAAAIArv4UBLAGHwAUACgAVEAsBQYGJhgQEBEDJgkdHSYhEQQpKgUhIiIhXVkiIgwAERsAFV1ZAAEMG11ZDBYAPysAGD8rABg/ERI5LysREgA5ERIBFzkRMxEzETMRMxEzETMxMAEyFhUQBRUWFhUUBCMiJicRIxE0NhcgEREWFjMgETQmIyM1MzI2NTQmApjd+v7Iu77+++9voUq0/uf+z0ebaAFQuKxtWJWemAYf0Lf+2jMIFce70OQhJP3jBjTg95b+tvyUJS8BLZifmI6GeYEAAAAAAQAK/hQEGwRKABMAIUAQEAQBBQQUFQoEBAEPBQ8BGwA/PzMSOREzERIBFzkxMAEjNBI3ATMTFhYXMzY2NxMzAQYCAiHDPC3+Q7vnJUEJCAZBHdm7/motN/4UWwEiewQ+/cBdxzAo100CSPvRdf7YAAIAb//sBGYGFAAeACoARUAjCRYAECUcEAMWHx8DHAMrLBMWECIAHwMGDV1ZBgAZKF1ZGRYAPysAGD8rERIAOTkREjkREgEXOREzETMRMxEzETMxMAEmJjU0NjMyFhcHJiYjIgYVFBYXFhYVFAAjIiQ1NBIBNCYnBgYVFBYzMjYCG4tzx6lovoBOZaRXUmBtpdWs/vLy5f7u4AJdd4u9wqqRnqgDpk+fYoSaLkCNODBMQUVrW3X0nez+9fjSswEB/nd8skkt1qGKqbUAAAEAWP/sA5gEXgAlAGVAORQTEyMEECMXHQsLARcQBCYnFAIlJQJdWUUlARklAQgl6CUCEA8lARQDJSUNGhohXVkaEA0HXVkNFgA/KwAYPysREgA5GC9fXl1eXV1dKxESADkREgEXOREzETMRMxEzETMxMAEVIyAVFBYzMjY3FQYjIiY1NDY3NSYmNTQ2MzIWFwcmJiMiFRQhAteV/sqUj1WrZIvj3PFxg2Nq579vrVdEY4RK+AE5AoWTvVldJy+eS6uUY4MmCxyAXYecJSmPLBycqAAAAQBx/nEDqAYUACQANkAZEgMFGgAXHgwABQwFJSYiCQMaFxgXXVkYAAA/KxEAMxgvLjMREgE5OREzETMzETMRMzMxMAUUByM2NTQmJicmJjU0PgM3DgIHIzUhFQYAAhUUFhYXFhYDqIGyfzFuWcrBLlR5ncoHOVSb9gL61/7hhzt7ppiJUpGsqWUoLSYQI9jGZ7amn6S2AQIDApWHtP69/uSjYHZHIh9xAAAAAAEArv4UBEwEXgAUADNAGQwICAkUAAkAFRYMCRAKDwkVABsQBF1ZEBAAPysAGD8/PxESORESATk5ETMRMxEzMTABETQmIyIGFREjETMXMzY2MzIWFREDmnaAqZm0kRsKM7hvysT+FASqhIW/z/3HBEqWUVnEz/tJAAMAcf/sBFIGHwALABIAGQBnQEAWEBAGABcPBg8aGxYQXVnnFgHWFgGFFpUWtRYDSRZZFgJpFgFYFgEPFo8WnxYDCwMWFgMJCRNdWQkBAwxdWQMWAD8rABg/KxESADkYL19eXV1dcV1dXSsREgE5OREzMxEzETMxMAEQAiMiAhEQEjMyEgEyEhMhEhITIgIDIQICBFL5+/T59vf3/f4MoJcH/YsEl5yWlwoCcw2aAwT+bv56AZMBhQGWAYX+bPv0ASgBKP7P/uEFDP71/uQBIAEHAAAAAQCo/+wCoARKAA8AH0AOAQ4OCBARDw8LBF1ZCxYAPysAGD8REgE5OREzMTABERQWMzI2NxUGBiMiJjURAVpKVCxiGhtwNqSTBEr8+mNiDwiKDBSqrAMI//8ArgAABDMESgIGAPoAAAAB//T/7ARWBiEAIgAxQBgAFggBIyQBHx8LABULBl1ZCwEYE11ZGBYAPysAGD8rABg/EjkRMxESATk5MzIxMCMBJy4CIyIHNTYzMhYWFwEWFjMyNxUGIyImJwMmJyMGBwMMAdk3IjFDMT41RENefFs4AWIULyQYJTJDSlogllURCCFQ+gQ1mllCIQyRETyBm/wlOTYKhRhKWwGk81N+vv3BAAAA//8Arv4UBE4ESgIGAHcAAAABAAAAAAQMBEoAEAAaQAsADBESBxALAA8QFQA/PzIRORESATkyMTARMxMeAxczNhIRMxACByO63A0jIhwICKyass/hwgRK/bAlYWNbHrABtQFN/pT+BOIAAQBv/nEDqAYUADAAZEAzKCsJFA8ZJQQDAxQMBh0AFAYlKysGAAMxMgQZGBgZX1kPGAERBhgYDSIuKA8MDQxdWQ0AAD8rABgQxC8uMxI5L19eXSsREgA5ERIBFzkRMxEzETMRMxEzETMRMzMRMxEzMTATNDY3NSY1NDY3BiMjNSEVIyIGBhUUFjMzFSMiBhUUHgIXFhYVFAcjNjY1NCYnJiZvm4PZjKOQZz4CxTaC3H2irKqwrtEyXIRSlYR7qjdCd4PIywGmidAqDD7Zc50vDJWLTo5dcGmJqpBOYTsjESFuWYqzSpQyNjsYIskAAAD//wBx/+wEaAReAgYAUgAAAAEAGf/sBQQESgAUADdAHQoLEwcRAwcLDQUVFgsVEgkNDw1dWQ8PBQBdWQUWAD8rABg/KxEAMzMYPxESARc5ETMRMzEwJTI3FQYjIhERIREjESM1NyEVIxEUBIs1JTFW4/4vst+TBFjXfxSNGgEGAsL8TAO0TkiW/Up/AAACAKL+FARmBF4AEAAcADNAGQYVCQkKABoKGh0eChsOEV1ZDhADF11ZAxYAPysAGD8rABg/ERIBOTkRMxEzETMzMTABEAIjIicjFhURIxEQEjMyACUiBhURFjMyNjU0JgRm/ueweQoKtv3s2wEA/h2ZknSzoY6NAiX+8P7XXETN/t0EHwELASD+0JnKzP60ZNTQ0tAAAQBx/nEDqgReAB8ALkAVGgcKDQAUBw0UDSAhBBEKFx1hWRcQAD8rABgvLjMREgE5OREzETMRMxEzMTABFBYWFxYWFRQHIzY2NTQmJicmJjUQADMyFhcHJiMiBgErOYqfkYp7qjRHL3Fa0MMBEf9Snjk5jGyqpAIIgIBOIh9vXJCtRZc0Ji0oECj81gEfATkiGZY01QAAAAACAHH/7AS+BEoADQAZADVAGgwUCwAOBwAUBxQaGwwXCRddWQkPBBFdWQQWAD8rABg/KxEAMxESATk5ETMRMxEzETMxMAEUBgYjIgA1ECEhFSEWARQWMzI2NRAnIyIGBGZ755zt/vYCVAH5/vay/MWlnZumqj/awAH4nO+BASH/Aj6Wp/7/wcrAswEFu8sAAAABABT/6QOeBEoAEgAsQBcDDgEIDhAEExQCEBIQXVkSDwsFXlkLFgA/KwAYPysRADMREgEXOREzMTABFSERFDMyNxUGBiMiJjURITU3A57+VMRoRidxMLeq/tmWBEqY/Z7VFocPEqupAnVQSAABAKL/7AR5BEoAFAApQBMOCwYDEgsDCxUWDgQPAAhdWQAWAD8rABg/MxESATk5ETMRMxEzMTAFIiYRETMRECEyNjU0JiczFhYVEAACderptAEto50bJbQnG/77FPsBCwJY/a7+h+r1gNGbleN8/sL+1AAAAAIAcf4UBVwEXgAYACIARkAkBwoKBCAYGAwAExkZAAQDIyQGEAAbEBxdWRAQIAwBDF1ZFwEWAD8zKxEAMxg/KwAYPz8REgEXOREzETMzETMRMxEzMTABESQAERA3FwYGFRAFETQ2MzISFRQCBgcRATQmIyIGFRE2NgKD/vv+89GLWU8BXqqaudyI+qcBc3hlR0+vxP4UAdoOASEBDgEq/2B133v+fyMCYrbF/tr5sf78kwn+JgQpuNRycv2cEOgAAAH/6f4UBFwEUAAgADdAHRcIGA8HGB4EISIVBRcAABxeWQAGDxcbEQxdWREbAD8rABg/P8QrERIAOTkREgEXOREzMjEwEzIWFhcTATMBExYWMzI3FQYjIiYnAwEjAQMmJiMiBzU2uDpQPy2LATrA/lK/KVFCLDBBPnOOPJL+nMEB06geRTQoHDUEUC1ZdP6gAlT8/v4ca1EIixF2oQF9/WwDSAG0UlwMjREAAAABAKL+FAWaBhIAGQBBQCEHBBYTAQ4OGA8KBAQPEwMaGxkABxQPDxsBGBAYXVkNEBYAPzMrEQAzGD8/Mz8REgEXOREzETMzETMRMxEzMTABETY2NTQmJzMSFRAABREjESQAEREzERAFEQNov8IdJLJA/uL+7LD+9v70sgFkBhL6cxLeyIPlpf7s8v7r/tAR/iYB2gkBIAESAiH92f55GQWPAAAAAAEAc//sBc8ESgAnAENAIRwZBwoKAxMQIBkZEAMDKCkmBhERABwGDxYNAA1dWSMAFgA/MisRADMYPzMSOS8RORESARc5ETMRMxEzETMRMzEwBSICETQSNzMGAhUUFjMyNjURMxEUFjMyNjU0AiczFhIVEAIjIicjBgH6t9A7PrhCO3VqX2asZV1oejtCuEI30LfeRQpBFAEoAQChAQmMlf78n73UjnwBNv7KgIrKx50BCJOa/v6a/v/+2bi4AAAA////6v/sAqAF1wImAYYAAAEHAGr+uQAAAA23AgEJECIPACUBKzU1AP//AKL/7AR5BdcCJgGSAAABBgBqNwAAELECAbj/+7QVJwQSJQErNTX//wBx/+wEaAZxAiYAUgAAAQYBVCMAAAu2AjwhHQcAJQErNQD//wCi/+wEeQZxAiYBkgAAAQYBVCkAAAu2ASEeGgQSJQErNQD//wBz/+wFzwZxAiYBlgAAAQcBVADTAAAAC7YBODEtAyAlASs1AAAA//8AxwAAA/gHKQImACgAAAEHAGoAIQFSABdADQIBIQUmAgESDB4CCyUBKzU1ACs1NQAAAAABABT/7AVUBbYAHQBEQCUWDg4PGwgIFAIPEQUeHxYNaVkWFhIPEhUREhFpWRIDAAVpWQATAD8rABg/KxEAMxg/EjkvKxESARc5ETMRMxEzMTAFIic1FjMyNjU1NCYjIREjESE1IRUhESEyFhUVFAYD32c0O1hjZH+H/oe3/qwDx/5EAYfQ3sUUGKAVdm6DfG39IQUUoqL+bcGyj77VAAD//wDHAAAEAAdzAiYBYQAAAQcAdgBeAVIAE0ALAQ8FJgFLDwsFASUBKzUAKzUAAAAAAQB9/+wE6QXNABgASkAqAwYRFgwFEQQZGgMGaVnZAwE6AwEDDwMBDwYDAw4UFABpWRQEDglpWQ4TAD8rABg/KxESADkYL19eXV9dXSsREgEXOREzMzEwASIEByEVIRIAMzI3FQYjIAAREAAhMhcHJgNC3P74GgLI/TMMAQXypMqe6f6z/qEBeAFR67hLrwUp9eqg/vX+7jqgOwGEAW0BXQGTWp5UAAD//wBo/+wEBAXLAgYANgAA//8AUgAAAmIFtgIGACwAAP//ADwAAAJ8BykCJgAsAAABBwBq/wsBUgAXQA0CASEFJgIBAgweBgslASs1NQArNTUAAAD///9g/nsBdQW2AgYALQAAAAIAAv/pBy8FtgAaACMAX0A3FxsbBAAfHwQNAyQlFyNpWdgXAToXAQkXAQ8AF6AXAhIDFxcEFQQba1kEEhUGaVkVAwsQa1kLEgA/KwAYPysAGD8rERIAORgvX15dXl1dXSsREgEXOREzETMRMzEwARQEISERIQICBgYjIic1FjMyNjYSEyERMyAEATMyNjU0JiMjBy/+7P75/rH+mzhVU4xtRUA0PTpROEdIArh5ARgBIf1Ogb61uthiAazP3QUU/lf98P11GZoZbPIBxQIQ/ZbP/iGFiYZ6AAIAxwAAB1wFtgASABsAYkA3CwcHCA8TEwwEABcXBAgDHB0bBgsGaVkP2AsBOgsBCQsBDwALoAsCEgMLCwQNCQMIEgQTa1kEEgA/KwAYPz8zEjkvX15dXl1dXTMrEQAzERIBFzkRMxEzMxEzETMRMzEwARQEISERIREjETMRIREzETMgBAEzMjY1NCYjIwdc/uv+/f6u/Y24uAJzuncBGAEh/VCBvrO61mIBrNHbAqr9VgW2/ZYCav2Wz/4hhYmGegAAAAABABQAAAVUBbYAEwA+QCASBgAMDA0FBgYNDwMUFQALaVkAABAGDRITDxAPaVkQAwA/KxEAMxg/MxI5LysREgEXOREzETMRMxEzMTABITIWFREjETQmIyERIxEhNSEVIQIfAY3N27h3hf5/uf6uBAD+CwN/vLf99AH2e2z9IwUSpKQAAP//AMcAAATyB3MCJgG0AAABBwB2AKoBUgATQAsBFAUmAR4UEAUAJQErNQArNQAAAP//ABf/7AT+B2ICJgG9AAABBwI2AEYBUgATQAsBFwUmAQgaIgkSJQErNQArNQAAAAABAMf+fwUQBbYACwAwQBgIBQIDAAkJAwUDDA0KBgMDIgUIaVkBBRIAPzMrABg/PzMREgEXOREzETMRMzEwISERIxEhETMRIREzBRD+NL3+QLgC27b+fwGBBbb67gUSAP//AAAAAAUbBbwCBgAkAAAAAgDHAAAEgwW2AAwAFQBXQDEHAAkNDQQAEQQRFhcJFWlZ2AkBOgkBCQkBDwAJoAkCEgMJCQQFBQhpWQUDBA1rWQQSAD8rABg/KxESADkYL19eXV5dXV0rERIBOTkRMxEzETMRMzEwARQEISERIRUhETMgBAEzMjY1NCYjIwSD/vz+9P5UA2T9VNkBGAET/PzhvKmw08MBrNjUBbai/jjG/hh/j4x0AAD//wDHAAAExQW2AgYAJQAA//8AxwAABAAFtgIGAWEAAAACAAz+fwVaBbYADQATAD9AIBMEBAUMDgABAQ4FAxQVAQUiChBqWQoDDBMGAwZpWQMSAD8rEQAzMxg/KwAYPzMREgEXOREzETMRMxEzMTABIxEhESMRMzYSEyERMyERIQYCBwVasPwSsHGU2BMCpLr+j/61E81//n8Bgf5/AiX8As4BSPruBGz5/Wjb//8AxwAAA/gFtgIGACgAAAABAAIAAAbRBbYAEQA9QCEBEQYNDQMOCAoKCQ4AEQUSEwACDwYMCQYRBwQBAw4LERIAPzMzPzMzEhc5ERIBFzkRMxEzMxEzETMxMAEBMwERMxEBMwEBIwERIxEBIwJO/cnMAi+xAi/M/ckCStP9xbH9w9MC8ALG/TwCxP08AsT9PP0OAuX9GwLl/RsAAAEATv/sBEYFywAnAF1AMwMEBBwjDAAcBxMTHBcMBCgpAxcYGBdrWToYAQMPGN8YAg8GGBgKJSUfa1klBAoQa1kKEwA/KwAYPysREgA5GC9fXl1fXSsREgA5ERIBFzkRMxEzETMRMxEzMTABFAYHFRYWFRQEISAnNRYWMzI2NTQmIyM1MzI2NTQmIyIGByc2ITIEBCe2pLq//sr+6P75o2PjYsbJ4uDRxtnXn4dyt21Y0wEd4QECBGCOtRkIGbSRzeVPqC8xkomDh5qRe2p7Nkd9mMQAAQDJAAAFYAW2ABEAKEASBREJDgoRChITDgURBwADChESAD8zPzIROTkREgE5OREzMxEzMTATMxEUAgczATMRIxE0EjcjASPJrAoFCQMkzawOAwn82s0FtvzdVf7oKgS6+koDG2gBFSn7PwD//wDJAAAFYAdiAiYBsgAAAQcCNgDjAVIAE0ALARIFJgEbFR0RCSUBKzUAKzUAAAAAAQDHAAAE8gW2AAoAMEAXCQAADAcDAwQECgsMBwIKAwQIBQMBBBIAPzM/MxIXORESATk5ETMRMxEzETMxMCEjAREjETMRATMBBPLe/Wu4uAKF0f2FAuX9GwW2/TwCxP06AAAAAAEAAv/pBOMFtgASAClAFAABCgETFAESEQNpWREDCA1rWQgTAD8rABg/KwAYPxESATk5ETMxMCEjESEHAgIGIyInNRYzMjYSEyEE47j+MR8/XpeCSjs0PU9dbTcDIAUU7v4U/lanGZoZxwK+Aa4AAP//AMcAAAZ7BbYCBgAwAAD//wDHAAAFJQW2AgYAKwAA//8Aff/sBcMFzQIGADIAAP//AMcAAAUQBbYCBgFuAAD//wDHAAAEbwW2AgYAMwAA//8Aff/sBM8FywIGACYAAP//ABQAAARcBbYCBgA3AAAAAQAX/+wE/gW2ABYAKUAUEggCCQMXGAgNDQARCQMABWlZABMAPysAGD8zEjkRMxESARc5MzEwBSInNRYzMjY3ATMBFhczNzcBMwEOAgElcVZbZmuDPf3NygGiGRIICB8BXsP+LVOIrxQerilkhAQ//NMvORhSAyv76rqqUAAAAP//AGj/7AYEBcsCBgFzAAD//wAIAAAEqAW2AgYAOwAAAAEAx/5/BcMFtgALADJAGQgFAAkCAwMJBQMMDQoGAwMiAAgFCGlZBRIAPysRADMYPz8zERIBFzkRMxEzETMxMCUzESMRIREzESERMwUQs7H7tbgC27ai/d0BgQW2+u4FEgAAAAABAKQAAATPBbYAEwArQBULCAARAQgBFBUFDmlZBQUBEgkDARIAPz8zEjkvKxESATk5ETMzETMxMCEjEQYGIyImNREzERQWMzI2NxEzBM+4lcho0N64fIxfsaO4Alg1J8GyAkf903Z1HjYCxAABAMcAAAeDBbYACwAxQBgEAQgFAAkJBQEDDA0KBgIDCAQBBGlZARIAPysRADMYPzMzERIBFzkRMxEzETMxMCEhETMRIREzESERMweD+US4Aki4Akq6Bbb67gUS+u4FEgABAMf+fwgXBbYADwA7QB4DAAcECwgNDg4IBAAEEBEJBQEDDiILBwMAA2lZABIAPysRADMzGD8/MzMREgEXOREzETMRMxEzMTAzETMRIREzESERMxEzESMRx7gCObsCO7ixsQW2+u4FEvruBRL67P3dAYEAAAIAEAAABSEFtgAMABUAVUAxCQ0NBAAREQQGAxYXCRVpWdgJAToJAQkJAQ8ACaAJAhIDCQkEBwcGaVkHAwQNa1kEEgA/KwAYPysREgA5GC9fXl1eXV1dKxESARc5ETMRMxEzMTABFAQjIREhNSERMyAEATMyNjU0JiMjBSH+9/3+Sf6sAgvnAQoBFfz67bKor8fRAazQ3AUUov2W0f4jhYmGegAAAwDHAAAGFwW2AAoAEwAXAFVAMQcLCwQADxQVFQ8EAxgZBxNpWdgHAToHAQkHAQ8AB6AHAhIDBwcEFgUDFRIEC2tZBBIAPysAGD8/MxI5L19eXV5dXV0rERIBFzkRMxEzETMRMzEwARQEIyERMxEzIAQBMzI2NTQmIyMBIxEzBH3++f/+ULjhAQkBFP0C57KmrMbNBJi5uQGsz90Ftv2W0v4ih4mGeP1WBbYAAAIAxwAABLoFtgAKABIASkAqBwsLBAAOBA4TFAcSaVnYBwE6BwEJBwEPAAegBwISAwcHBAUDBAtrWQQSAD8rABg/EjkvX15dXl1dXSsREgE5OREzETMRMzEwARQEIyERMxEhIAQBISARNCYjIwS6/uz+/h+4ARIBDQEc/MUBFwFmt8r8AazO3gW2/ZbV/icBDoV7AAABAD3/7ASRBcsAGwBZQDUDDwkZFhYXDwMcHRgXaVnZGAE6GAEIGAEPMBhAGAIAGKAYAhwDGBgMBQUAaVkFBAwTaVkMEwA/KwAYPysREgA5GC9fXl1xXl1dXSsREgEXOREzMxEzMTABIgcnNjMyBBIVEAAhIiYnNRYWMyAAEyE1ISYkAdunq0yu8tkBOaL+k/6rca9lVq9jAQgBDQj9OQLFFP75BSlOmlaw/rrh/or+bhgjoBcjARgBB6Le/QAAAAACAMf/7AfsBc0AEgAeAF9ANwwICAkTDQYAGRkGCQMfIAwHaVnYDAE6DAEJDAEPAAygDAISAwwMCQoDCRIQHGlZEAQDFmlZAxMAPysAGD8rABg/PxI5L19eXV5dXV0rERIBFzkRMxEzMxEzETMxMAEQACEgAAMhESMRMxEhEgAhIAABEBIzMhIREAIjIgIH7P6s/s7+1P6rDP6muLgBXhcBUAEeATIBWPuu5eHi6eXi4+cC3f6e/nEBbgFQ/VYFtv2WATUBTP5y/p7+3v7QASwBJgElASn+0wACAC0AAARWBbYADQAVAEdAJQMSAhIGCxUMBgwWFwMAFQBrWQAVEBUCIQMVFQkMAhIJD2tZCQMAPysAGD8zEjkvX15dKxEAMxESATk5ETMzETMzETMxMAEBIwEmJjU0JCEhESMRESMiBhUQITMCf/6F1wGam5IBEQERAZq43beyAXHVAl79ogJ/Ms6extP6SgJeAruAhf7m//8AXv/sA9cEXAIGAEQAAAACAHX/7ARcBh8AGAAiAExAKAYTIQATGwAbIyQMFhAQHV1ZDxA/EAILAxAQFgUFBmFZBQEWGV1ZFhYAPysAGD8rERIAORgvX15dKxESADkREgE5OREzETMRMzEwExASNzYlFwcGBwYGBzM2NjMyEhUQACMiAAUgERAhIgYGBxB13e3fARMfd+iNkJEKDTrBbcrk/vbs6v75AgABKf7zSYlvIwKTAW8BjTQtL54TJh0g5dpRYf786v77/uEBZtEBfQFqPGI7/fIAAAMArgAABFgESgAOABYAHwBqQDwDBAQXHBQUCwAXBw8PFwsDICEDExwcE11ZRRwBGRwBCBzYHOgcAxAPHAEUAxwcCwwMG11ZDA8LFF5ZCxUAPysAGD8rERIAORgvX15dXl1dXSsREgA5ERIBFzkRMxEzETMRMxEzETMxMAEUBgcVFhYVFAYjIREhIAM0JiMhESEgAzQmIyERITI2BDV4b4t/5df+EgHsAZuRkIv+2QErARcfd3j+zAETk30DNWpvFAkTf2ucpgRK/QJcSv6fApRNQv7TSQAAAAEArgAAA0wESgAFAB9ADgIDAwAGBwMVBAFdWQQPAD8rABg/ERIBOTkRMzEwASERIxEhA0z+FrQCngOy/E4ESgACACf+gwR9BEoADQATAD9AIBMEBAUMDgABAQ4FAxQVAQUiChBeWQoPDBMGAwZdWQMVAD8rEQAzMxg/KwAYPzMREgEXOREzETMRMxEzMTABIxEhESMRMzYSEyERMyERIwYCBwR9rv0ErFiFlAQCQaD+svQPjWb+gwF9/oMCFbkB5wES/E4DJN3+Q4oA//8Acf/sBBsEXgIGAEgAAAABAAIAAAX6BEoAEgA3QB8CCQkSCgYEBQoOEA0HExQOEgsCBQgGDQMAEA8KBw0VAD8zMz8zMxIXORESARc5ETMzETMxMAEzEQEzAQEjAREjEQEjAQAnMwECqqgBvsP+OwHszf4lqP4lzQHs/sqPxQG8BEr96wIV/ev9ywIt/dMCLf3TAjUBcKX96wAAAAEARP/sA48EXgAjAGVAOQ8QEAIIGA0CEx4eAiIYBCQlDyIjIyJdWUUjARkjAQgj6CMCEA8jARQDIyMWCgoEXVkKEBYbXVkWFgA/KwAYPysREgA5GC9fXl1eXV1dKxESADkREgEXOREzETMRMxEzETMxMAEgNTQjIgYHJzYzMhYVFAcVFhYVFAYjIic1FjMyNjU0JiMjNQGHATf5T4hfP6vUwdrOfXb62/KEt72NmJqflAKFqJweKI9Mmoe7OAgkiGeXrEeiVl5cXluTAAEArgAABHUESgANADBAFwEDAwwGCAoHDAcODwMKDA0PBxUEDwwVAD8/Pz8SOTkREgE5OREzMzMRMxEzMTABEQcHATMRIxE3NwEjEQFYCAQCTN2oAwX9uN8ESv1iwjgDmPu2AoeLhPxqBEoA//8ArgAABHUGEAImAdIAAAEGAjZGAAALtgEBERkNBiUBKzUAAAEArgAABCMESgAKACtAFgoGBgcDAQIHBAsMCgUCAwcACA8EBxUAPzM/MxIXORESARc5ETMRMzEwATMBASMBESMRMxEDN8X+KwH80f4QtLQESv3v/ccCLf3TBEr96wAAAAEADv/yA/IESgAQAClAFAABCQEREgEVDwNdWQ8PBwxkWQcWAD8rABg/KwAYPxESATk5ETMxMCEjESECAgYjIic1FjMyEhMhA/K3/r0cX5l3QR4VI26DJQKWA7T+mP5lvw6HCAHQAfsAAQCuAAAFSARKABUAKkAUEBEFBhEGFhcPBwADCwMSDwsGERUAPzMzPzMSFzkREgE5OREzETMxMCU2NwEzESMRBgcBIwEmJicRIxEzARYC9h4uAR7ooiMv/u6S/u4UJxOi4QEVJaxtdAK9+7YDiW14/VwCqDByP/x3BEr9Xl4AAAABAK4AAARqBEoACwBVQDIBCQkKBQIGCgYMDQEIXVkEAQH0AQEGtQEBA48BAU0BXQECfQEBBb8BAQEBCgMLDwYKFQA/Mz8zEjkvXV9dcV1fXV9dcSsREgE5OREzMxEzETMxMAERIREzESMRIREjEQFiAlS0tP2stARK/jcByfu2Aen+FwRKAAD//wBx/+wEaAReAgYAUgAAAAEArgAABE4ESgAHACVAEQABBAUBBQgJBQEVAgdhWQIPAD8rABg/MxESATk5ETMRMzEwISMRIREjESEBYrQDoLT9yARK+7YDsAAAAP//AK7+FAR7BF4CBgBTAAD//wBx/+wDkwReAgYARgAAAAEAKQAAA6QESgAHACVAEgIDAAMFAwgJAxUBBQYFXVkGDwA/KxEAMxg/ERIBFzkRMzEwASERIxEhNSEDpP6Zsv6eA3sDsvxOA7KYAP//AAL+FAQUBEoCBgBcAAAAAwBv/hQFXAYUABEAFwAcAExAJxIJDxsEBAwUBQAYGAUJAx0eDQAFGxoVDBVdWQ8MEBsUBhRdWQMGFgA/MysRADMYPzMrEQAzGD8/ERIBFzkRMxEzMzMRMzMRMzEwARQABREjESYANTQAJREzERYABRAFEQYGBRAlESQFXP7h/wCw/P7eASABBKr9ASL7zwFovKwDd/6bAWUCJfX+1BT+JAHcFQEs9PoBJxQBuv5GGf7U8P6DJQNCE9O6AXcn/MAnAAAA//8AJQAABBcESgIGAFsAAAABAK7+gwTpBEoACwAyQBkGAwoHAAEBBwMDDA0BIggEDwoGAwZdWQMVAD8rEQAzGD8zPxESARc5ETMRMxEzMTABIxEhETMRIREzETME6bD8dbQCOLSb/oMBfQRK/E4DsvxMAAAAAQCYAAAEOQRKABIAK0AVAREJBgoRChMUDgNdWQ4OCgcSDwoVAD8/MxI5LysREgE5OREzMxEzMTABERQzMjY3ETMRIxEGBiMiJjURAUzTXKVltLRusWykvgRK/nC8Nz4B1/u2AelHOKyYAZwAAQCuAAAGfwRKAAsAMUAYCAUACQQBAQkFAwwNCgIGDwAIBQhdWQUVAD8rEQAzGD8zMxESARc5ETMRMxEzMTAlIREzESERMxEhETMD8AHbtPovtAHZtZgDsvu2BEr8TgOyAAAAAAEArv6FBx0ESgAPADtAHgwJAA0EAQYHBwENCQQQEQciDgIKDwQADAkMXVkJFQA/KxEAMzMYPzMzPxESARc5ETMRMxEzETMxMCUhETMRMxEjESERMxEhETMD8AHbsqC1+ka0Adm1mAOy/Ez97wF7BEr8TgOyAAAAAAIAJQAABSMESgAKABIAaUBAABAQBgMLCwYIAxMUAA9dWYQAlAACBkUAAQMfAAENAN0A7QADEAVgAHAAAg8AARQDAAAGCQkIXVkJDwYQXlkGFQA/KwAYPysREgA5GC9fXl1xX15dXV9dX10rERIBFzkRMxEzETMxMAEhIBEQISERITUhATQmIyERISACNwEvAb3+Qf4f/qICEgI6eZj+1wEvAQsCh/7J/rADspj8/FpQ/qEAAAMArgAABYsESgAKABIAFgBpQEAAEBAIBAsTFBQLCAMXGAAPXVmEAJQAAgZFAAEDHwABDQDdAO0AAxAFYABwAAIPAAEUAwAACBUJDxQVCBBeWQgVAD8rABg/PzMSOS9fXl1xX15dXV9dX10rERIBFzkRMxEzETMRMzEwASEyFhUUBiMhETMBNCYjIREhIAUjETMBYgEd1szY0v43tAINd5D++gEEAQkCHLS0Aoecm6epBEr8/FlT/p+RBEoAAgCuAAAEVARKAAkAEgBeQDkADw8HAwoHChMUAA5dWYQAlAACBkUAAQMfAAENAN0A7QADEAVgAHAAAg8AARQDAAAHCA8HD15ZBxUAPysAGD8SOS9fXl1xX15dXV9dX10rERIBOTkRMxEzETMxMAEhIBEUBiMhETMBNCYjIREhMjYBYgFGAaze1v4OtAI+fJH+zwE2f4kCh/7JpKwESvz8WFT+n1wAAAAAAQA7/+wDgwReABgAXUA5EAIWCgcHCAIDGRoJCF1ZlQkBaQkBOAkBWAkBbwl/CQIPCR8JnwkDCwMJCQATEw1hWRMQAAVhWQAWAD8rABg/KxESADkYL19eXXFdcV1dKxESARc5ETMzETMxMAUiJzUWMyATITUhJiYjIgcnNjYzIAAREAABXql6mo0BUBf94wIbDqKcaZczQKVMAQEBCf7hFDucPgFnk6icNpIdIv7d/ur+8f7WAAAAAAIArv/sBj8EXgASAB4AbUBBDAgICRMNBgAZGQYJAx8gDAddWYQMlAwCBkUMAQMfDAENDN0M7QwDEAUPDAEUAwwMCQoPCRUQHF1ZEBADFl1ZAxYAPysAGD8rABg/PxI5L19eXV9eXV1fXV9dKxESARc5ETMRMzMRMxEzMTABEAAjIgInIREjETMRITY2MzIAARQWMzI2NTQmIyIGBj/+/eLW/Q7+6bS0ARkW/NHeAQP88o6dnYyOm52OAif+8v7TAQvy/hcESv435fj+zv770NbW0M3T0wAAAAIAIQAAA8sESgANABUAPUAeAg4BDgUKEQsFCxYXAg0QDV1ZEBAICwEVCBNdWQgPAD8rABg/MxI5LysRADMREgE5OREzMxEzMxEzMTAzIwEmJjU0NjMhESMRIQEUISERISIG8tEBOX6CzrcB7LT+9f78AQwBA/7bc3cBzSCid5is+7YBtAFQugFqWgD//wBx/+wEGwXXAiYASAAAAQYAagYAAA23AwIRHC4DCiUBKzU1AAAAAAEAEv4UBE4GFAAnAIFAShkHEhAXGx0DDw8UECUHBwIQAygpHRAhGhITEl9ZFwgTGBMCEQ8TARQDExMhFSELXVm/IQEAIRAhICEDCQMhIRAVABAVAAVdWQAbAD8rABg/PxI5L19eXV0rERIAORgvX15dXl0zKxEAMxESORESARc5ETMRMzMRFzMRMxEzMTABIic1FjMyNRE0JiMiBhURIxEjNTM1MxUhFSEVFAczNjYzMhYVERQGAzFQOTc6gXd9qZm2nJy0AYr+dggKMbRzyMqQ/hQZlBWqA0KFgbvT/fAE14W4uIW0PVtOXL/S/LqgqgAAAP//AK4AAANMBiECJgHNAAABBgB29wAAC7YBSw8LBAUlASs1AAABAHH/7AOwBF4AGgBfQDoPEhIDCRkZEQMDGxwPEl1ZlQ8BaQ8BOA8BWA8Bbw9/DwIPDx8Pnw8DCwMPDwAGBgxhWQYQABVhWQAWAD8rABg/KxESADkYL19eXXFdcV1dKxESARc5ETMRMxEzMTAFIgAREAAzMhYXByYjIgYHIRUhFhYzMjY3FQYCe/r+8AET/VSgOzWJdZ6jEQIb/eMJpKFdjj54FAEhARIBFwEoIRqUNKCkk7ivJRmcOwAA//8AaP/sA3kEXgIGAFYAAP//AKAAAAFzBeUCBgBMAAD////sAAACLAXXAiYA8wAAAQcAav67AAAADbcCAQQEFgIDJQErNTUA////j/4UAXMF5QIGAE0AAAACAA7/8gZQBEoAFQAeAHNARgAbGwYDFhYGDgMfIAAaXVmEAJQAAgZFAAEDHwABDQDdAO0AAxAFYABwAAIPAAEUAwAABhQGG15ZBhUUCF1ZFA8MEGRZDBUAPysAGD8rABg/KxESADkYL19eXXFfXl1dX11fXSsREgEXOREzETMRMzEwATMgERAhIREhAgIGIyInNRYzMhITIQE0JiMjETMyNgOs/gGm/kb+YP8AG2CWdkMeHRlriCUCUAHwfZbd44SJAof+yf6wA7L+m/5jvg6FCAHJAgT8/FlR/qFcAAAAAAIArgAABqgESgARABkAcUBBDwsLDAETExAIBRYWCAwDGhsSCg8KXVkBhA+UDwIGRQ8BAx8PAQ0P3Q/tDwMQBQ8PARQDDw8IEQ0PDBUIE15ZCBUAPysAGD8/MxI5L19eXV9eXV1fXV9dxCsAGBDFERIBFzkRMxEzMxEzETMRMzEwAREzMhYVECEhESERIxEzESERExEzIDU0JiMECPbczv5K/lz+GLi4Aeyy5QENfZQESv47m5r+sAHp/hcESv43Acn9pv6htVpQAAAAAAEAEgAABEwGFAAeAHNAQRIACwkQFBYDCAgNCR4ACQAfIBYJGhMLDAtfWRAIDBgMAhEPDAEUAwwMGg4aBF1ZvxoBABoQGiAaAxoaCQ4AAAkVAD8zPxI5L11dKxESADkYL19eXV5dMysRADMREjkREgE5OREzETMzERczETMRMzEwIRE0JiMiBhURIxEjNTM1MxUhFSEVFAczNjYzMhYVEQOad3+pmbScnLQBsv5OCgw1t2zHyQKWhYO51f3wBNWHuLiHslhAVVXB0v1e//8ArgAABCMGIQImAdQAAAEGAHY/AAALtgEnFBAIAyUBKzUA//8AAv4UBBQGEAImAFwAAAEGAja/AAALtgEBGyMACiUBKzUAAAEArv6FBFIESgALADBAGAQBCgsIBQULAQMMDQsiBgIPAQRdWQkBFQA/MysAGD8zPxESARc5ETMRMxEzMTAhIREzESERMxEhESMCLf6BtAI8tP6LsARK/E4Dsvu2/oUAAAEAxwAABBIG4wAHACdAEgUGAwAGAAgJAQcGEgcEaVkHAwA/KwAYPxDGERIBOTkRMxEzMTABETMRIREjEQNmrP1tuAW2AS3+L/ruBbYAAAAAAQCuAAADUAWJAAcAJ0ASAgMABQMFCAkGBAMVBAFkWQQPAD8rABg/EMYREgE5OREzETMxMAEhESMRIREzA1D+ErQB8rADvvxCBEoBPwAAAP//ABkAAAdWB3MCJgA6AAABBwBDARIBUgAVtAEaBSYBuP+stB4iCRglASs1ACs1AP//ABcAAAYzBiECJgBaAAABBgBDdQAADrkAAf+htCUpCR4lASs1AAD//wAZAAAHVgdzAiYAOgAAAQcAdgGwAVIAE0ALASIFJgFJIh4JGCUBKzUAKzUAAAD//wAXAAAGMwYhAiYAWgAAAQcAdgEhAAAAC7YBTSklCR4lASs1AAAA//8AGQAAB1YHKQImADoAAAEHAGoBZAFSABm2AgEuBSYCAbj//rQZKwkYJQErNTUAKzU1AP//ABcAAAYzBdcCJgBaAAABBwBqANMAAAAQsQIBuP//tCAyCR4lASs1NQAA//8AAAAABIcHcwImADwAAAEHAEP/kgFSABW0AQoFJgG4/6C0DhIHAiUBKzUAKzUA//8AAv4UBBQGIQImAFwAAAEHAEP/YQAAAA65AAH/p7QdIQAKJQErNQABAFIB1QOuAnUAAwAoQBkAAwQFALUBAYoBAS8BXwG/Ac8B7wH/AQYBAC9dXV0zERIBOTkxMBM1IRVSA1wB1aCgAAAAAAEAUgHVB64CdQADAChAGQADBAUAtQEBigEBLwFfAb8BzwHvAf8BBgEAL11dXTMREgE5OTEwEzUhFVIHXAHVoKAAAAAAAQBSAdUHrgJ1AAMAKEAZAAMEBQC1AQGKAQEvAV8BvwHPAe8B/wEGAQAvXV1dMxESATk5MTATNSEVUgdcAdWgoAAAAAAC//z+OQNO/8sAAwAHAEtALgQACQUBAQiXAqcCxwLXAucCBQIQASABYAGwAeAB8AEGAZgFqAXIBdgF6AUFBQa4/8CzDxNIBgAvKzNdL10zXREBMxEzETMyMTABITUhNSE1IQNO/K4DUvyuA1L+OYOMgwAAAAEAGQPBAU4FtgAHABK2AQUICQAEAwA/zRESATk5MTATJzYSNzMGByUMFmI4hUIlA8EWWgEMef73AAAAAAEAGQPBAU4FtgAGABK2BAEHCAQGAwA/xhESATk5MTABFwYDIxI3AT8PNHyFRiAFthbH/ugBHdgAAQA//vgBdQDuAAYAHkARBAEHCAQvBj8Grwa/Bs8GBQYAL13GERIBOTkxMCUXBgMjEjcBZg8wgIZDJO4Xuv7bAQPzAAABABkDwQFQBbYABwAStgYCCAkDBwMAP80REgE5OTEwExYXIyYCJzfpJUKFLW0YDgW2+/peARxlFgAAAAACABkDwQLHBbYABwAPABpADAUBDQkEEBEACAQMAwA/M80yERIBFzkxMAEnNhI3MwIHISc2EjczAgcBng8bai6FQyT9xQwUZjaDQyQDwRZqARVg/vfsFlMBGnL+9+wAAAACABkDwQLHBbYABgAOABpADAgLAQQEDxALBA4GAwA/M8YyERIBFzkxMAEXBgMjEjchFwYDIzYSNwE9DzF/g0EjAjsPMX+IGkINBbYWwv7jAQjtFsL+42QBNF0AAAIAG/74AssA7gAGAA4AJ0AXCAsBBAQPEAsEBA4vBj8Grwa/Bs8GBQYAL10zMy8zERIBFzkxMCUXBgMjNjchFwYDIzYSNwFCDjCAhUElAjsPMICIGz4Q7he6/tv6/Be6/ttoASZoAAAAAQCFAAADlgYUAAsAOUAcCQICCAMKAQEHBAAEAwUEDA0BBAQKBwcDCAADEgA/PxI5LzMzETMREgEXOREzMxEzETMzETMxMAElEyMTBTUFAzMDJQOW/qEzzDH+tgFKMcwzAV8D3x/8AgP+H7IeAaH+Xx4AAAABAHsAAAOeBhQAFQBpQDgQBAQVDwUFCgwHFQoRFAADAwMJCw4DBhMCAgYKBwQWFxQLEQ4ODwMGBgAPCR8JAgkOCQ4FDwAFEgA/PxI5OS8vXTMzETMROS8zMzMREgEXOREzERczMxEXMxEzETMRMxEzETMRMzEwASUVJRMjEwU1BQMTBTUFAzMDJRUlEwI/AV/+oTLPMf6oAVgrK/6oAVgxzzIBX/6hKwHuHq4d/oUBex2uHgEkARUfrh4BfP6EHq4f/usAAQCeAe4CZAPpAAsAEbUABgwNAwkAL80REgE5OTEwEzQ2MzIWFRQGIyImnnRvbnV3bG51Aux6g4N6eoSFAAAAAAMAk//jBcEA+AALABYAIgAmQBQdFxEMBgAGIyQaDgMJA31ZIBQJEwA/MzMrEQAzMxESARc5MTA3NDYzMhYVFAYjIiYlNDMyFhUUBiMiJiU0NjMyFhUUBiMiJpNBPD1ERD07QgIZfTxDRDs2RwIXQjo9RUY8NEhvQkdHQkFLSkKJRURDSUJKRURFRERIQgAAAAcAZP/sCQYFywAJABUAIAAsADAAOgBGAF1AMTs2MUEAEAoFFichHBwtJwUvEEE2CEhHOEQeHioDDSoNKg0kEzADLxIHEwQ0PhkZJBMAPzMSOTk/Mz8/ERI5OS8vETMRMxI5ORESARc5ETMRMxEzETMRMxEzMTATFBYzMhEQIyIGBRQGIyImNTQ2MzIWARQWMzI2NRAjIgYFFAYjIiY1NDYzMhYBASMBARQWMzIRECMiBgUUBiMiJjU0NjMyFvpHTp6eTkcByZ2Xjp2ZkpOhAbZHTlFNnk5HAcmelo6dmJOTof71/NWdAysCo0dPnp5PRwHJm5iQm5iTkqEEAqWnAUwBSqWl5Onv3uXk7fzap6SjqAFIo6Xj6e/d5eTtAyL6SgW2/AKnpAFLAUijpeLq8Nzl5O3//wCFA6YBSAW2AgYACgAA//8AhQOmAr4FtgIGAAUAAAABAFIAcwIrA8UABgAkQBIDBgIEBgQHCAUgAQEQATABAgEAL11xLxESATk5ETMRMzEwEwEXAQEHAVIBWIH+4QEfgf6oAikBnEr+ov6hSwGbAAEAUABzAikDxQAGACRAEgQCAAMCAwcIASAFARAFMAUCBQAvXXEvERIBOTkRMxEzMTABAScBATcBAin+pn8BH/7hfwFaAg7+ZUsBXwFeSv5kAAAA//8Ak//jA2gFtgAnAAQB1wAAAQYABAAAABCxAwK4/l20GhoEISUBKzU1AAAAAf55AAACjwW2AAMAE7cABQIEAwMCEgA/PxEBMxEzMTABASMBAo/8g5kDfQW2+koFtgAAAAEAbQMdAskFxwASADxAIgwICAkSAAkAExQMCgAwCWAJkAkDAAkQCUAJAwkPCh4EDx8APzM/EMRdcTIRMxESATk5ETMRMxEzMTABETQmIyIGFREjETMXMzYzIBURAkxMTm9afGYODUmQAQIDHQGhVUVlfP6mAp1YZfr+UAABAFwAAAQjBbYAEQBhQDcHBQAOBAQJBQwQAgUEEhMDBwgHd1kACAgFDg4RdFlJDgEPDj8OXw5vDgQLAw4OCgUYCg11WQoGAD8rABg/EjkvX15dXSsREgA5GC8zKxEAMxESARc5ETMzETMzETMxMAEhFSERIxEjNTMRIRUhESEVIQG8ATb+yrKurgMZ/ZkCQP3AAY+F/vYBCoUEJ6L9/qEAAQBEAAAESgXJACEAjUBSEhgdGRUMCA8PHxsYCg4UAg4YFQUiIw8ZGhl3WQwAGhAaAgkDGgsdHh13WRoILx4BDx4fHj8eTx6vHr8eBgkDHh4VAAAFc1kABxYSFRUSdVkVGAA/KxESADkYPysREgA5GC9fXl1xMzMrEQAzGC9fXl0zKxEAMxESARc5ETMRMzMzETMzETMzETMxMAEyFwcmIyIVFSEVIRUhFSEUBgchFSE1NhEjNTM1IzUzNRACsMalQJaT7QGd/mMBnf5hP00DE/v6zMbGxsYFyVCNRf60haCHcZEtpJgrARCHoIWHAcMAAwCc/+wF7gW2ABYAIAApAGlANwsJIRwcHRclEBQUCQQSCSUdBSorDg4NEAoTEBNfWRAQHR4bIWtZGxseHRgeKWtZHgYGAF5ZBhkAPysAGD8rABg/EjkvKxESADkYLysRADMRMzMYLxESARc5ETMRMxEzETMRMxEzMTAlMjY3FQYjIiY1ESM1NzczFTMVIxEUFgEUBCEjESMRISABMzI2NTQmIyMFZiVREkNwdn2eoD9p4eE1/on+6f70P7IBEgIC/Z41wrWnsVR9DgaFII+JAcFSScPVif5WTFIDi+Tr/ccFtv0hjZyOiwAAAAABAC//7AR5BckAJgDSQI8dFxcZBwgaHBkFCAgfFiQRBAoWGQYnKAgYCxcPGB8YLxgDEwUXGHdZvxfPF98XA48XARAXAQAXEBcgF6AXsBfAFwYJAxcFHQAdEB0gHQMTBR4dd1kXAg8eAQ8eHx4vHk8eXx6PHp8e7x7/HgkPHh8eLx5fHp8erx6/Ht8eCAkDHh4TIiIAdFkiBxMOdFkTGQA/KwAYPysREgA5GC9fXl1xcjMzKwBfXl0RMxgvX15dcV1xKwBfXl0RMxEzERIBFzkRMzMRMxEzMxEzETMRMzEwASADIRUhBxUXIRUhFhYzMjcVBiMiAAMjNTMnNTcjNTMSADMyFwcmAwr+yEsB9P3+AgIBxP5MI8eomJmSquz+3S6klAIClKIoASjpzaJMogUr/nmFOD4sha6/QqBBAQsBAYUqKk6FAQgBHV+TVAAAAAAEAIX/9gYMBcEAAwAPABkALgBNQConHSIsEAoEFhYACiwCHQYvMCkaoBqwGgIYDRoNGg0HIAMDAhIlIAMTBxIAPzM/Mz8/ERI5OS8vETNdETMREgEXOREzETMRMxEzMTABASMBARQGIyImNTQ2MzIWBRQWMzI2NTQjIiUiJjU0NjMyFwcmIyIVFDMyNxUGBgUf/NWeAysBi6iXjqmplIuu/hdYVlNZrK79wKW5uq1nWyNTT9fTZ1gfaQW2+koFtvuYn7m7naO1uZ9zd3dz6dmyoqi1JWsf6ucjaw8YAAAAAAIAb//sA6IFywAcACQARkAhAxYjGhoPCRYdHQkMAyUmIw8fDRkKEw0MAgwCDBMABh8TAC8zLzMROTkvLxEzEjk5ERI5ORESARc5ETMRMzMRMxEzMTAlMjczBgYjIiY1NQYHNTY3ETQ2MzIWFRQCBxEUFhM0IyIGFRE2An2qEmkImpaZolBwTnKZjniMzrVQqntBPvp306m1t63nHht5FSYB6pCfoou6/tRO/uxneAQhvFVn/laDAAAEAMMAAAfHBbYADwAbACcAKwB1QEEDBgYHAA0LHBYQIisiKBYLBwYsLQoCBwgZJW1ZDxkfGQITAxkZEwgTH21ZDxMfEwIJAxMTKA4IAwEHKCgpbFkoEgA/KxEAMzMYPzMSOS9fXl0rERIAORgvX15dKxESADk5ERIBFzkRMxEzETMzETMRMzEwISMBIxIVESMRMwEzJjURMwEUBiMiJjU0NjMyFgUUFjMyNjU0JiMiBgM1IRUEx8n9XggQoc4CmggOogMAo5WMo6KUiqf+IlBaWk5OWllRYAIPBLj+4G381QW2+0z1jAMz/LmkubyhpLa5oXF1dXFybW39H42NAAAAAgAjAuUFhwW2AAcAGABCQCQAAQoMDA0TFhQUDQYBAwUZGgkXEAMEDQgUAwEHAw4RAwEEBAMAPxczETMvFzMSFzkREgEXOREzMxEzETMRMzEwASMRIzUhFSMBAyMXESMRMxMTMxEjETcjAwFzgc8CIdECVMUIBnvBwMe6gwYIzwLlAmNubv2dAit//lQC0f3VAiv9LwGiif3VAAD//wBOAAAF9gXNAgYBdgAAAAIAZv/dBIsESAAXAB8ANEAbHw4MFRgOBAUgIQ0fLx8/HwIUHxQfEQgRABwIAC8zLzIREjk5Ly9dETMREgEXOREzMTAFIiYCNTQ2NjMyFhIVIREWFjMyNjcXBgYTESYmIyIHEQJ5nfGFivSVmPOH/MUxplKDt1FIYtmTMqNYrXojkwEFnav/jI7+/aX+nDVGaYEpm3wCiwEVNUJ1/ukAAP//AEX/7AYEBbYAJwIXAmQAAAAmAHv5AAEHAj4Dav2zAAu0BAMCGRIAPzU1NQAAAP//ACP/7AYbBckAJwIXAqoAAAAnAj4Dgf2zAQYAdQAAAAu0AwIBDhIAPzU1NQAAAP//AEf/7AYXBbYAJwIXAqQAAAAmAjwKAAEHAj4Dff2zAAu0BAMCLRIAPzU1NQAAAP//AGb/7AYPBbYAJwIXAk4AAAAnAj4Ddf2zAQYCPS0AAAu0AwIBDhIAPzU1NQAAAAACAGL/7AQ5BccAGgAnAEFAIh4OFCUlBwAPDw4HAygpCyFkWQsLGAQYEV1ZGAQEG11ZBBYAPysAGD8rERIAORgvKxESARc5ETMRMxEzETMxMAEQAgQjIiY1NBI2MzIWFzcCISIGBzU2NjMyEgEyEjcmJiMiBgYVFBYEOaf+7LGwu4nqmFmNLgQE/vI6jTs9mkPS3P2ejtojFH1QY6BhYQOk/vn+N+jLwKoBNJ9RRUwBhSoorB0h/u37zwE66VZsgvZ7dH4AAgApAAAEfQW2AAUADAAnQBIJBQQKBQoNDgYFAQMFCWlZBRIAPysAGD8SORESATk5ETMRMzEwNwEzARUhAQYHASEBJikBzbgBz/usAiczK/78AsT/AENxBUX6uW8E7siC/P4C+skAAAABAMX+FAUlBbYABwAlQBEDBAcABAAICQAEGwUCaVkFAwA/KwAYPzMREgE5OREzETMxMAERIREjESERBG39ELgEYP4UBv75Agei+F4AAQBI/hQE4QW2AAsAQEAiAwAHCQsGCAIJAAYMDQIIBAEJAAMHBAQHaVkEAwAJaVkAGwA/KwAYPysREgA5ERI5Ejk5ERIBFzkRMxEzMTATNQEBNSEVIQEBIRVIAnL9ngRI/LoCOf2vA5/+FHEDlAMrcqL9CfyZogAAAQBmAokEKwMbAAMAIkAVAAMEBQAvAV8BfwGvAc8B7wH/AQcBAC9dMxESATk5MTATNSEVZgPFAomSkgAAAQAl//IEwwaeAAgAIEANCAMJCgMEBgQGBAcBBwAvLxI5OS8vETMREgE5MzEwBSMBIzUhEwEzAnOF/uu0ASXnAgCSDgMKj/1eBbUAAAMAdwGRBS0EDgATAB8AKwBKQCUPBQUXHQojFwApKRcKAywtIxcHDSYUFAMHIBoaBxF/DQFADQENAC9dXTMzMxEzLzMzETMREjk5ERIBFzkRMxEzETMRMxEzMTABFAYjIicGIyImNTQ2MzIXNjMyFgEyNjcmJiMiBhUUFgEiBgcWFjMyNjU0JgUtqIG5fH2uiKWphLR5fLeEpfx/P2w0MWtFTV9eApw/azUxbERMYF8Cz4a42dSwjYmy19Ou/r9XYV5aaVFRZQFsWV9eWmhQTmoAAAAAAQAK/hQDAAYUABQAHkAOCBIDEg0DFRYQCxsFAAAAPzI/MxESARc5ETMxMAEyFxUmIyIVERQGIyInNRYzMjUREAJ/Uy47OKqopU89PT6wBhQSlRjp+uW0uRWTGOkFGwFsAAAAAAIAYAGDBC8EIwAXAC8AWEA5GwMoEAMQMDEnGA8eHx4vHq8eBB4kGx4DICoBACoQKiAqAyoADyoDDwYfBi8GrwYEBgwDBgOfEgESAC9dFzMvXRczL11xFzMvXTMzERIBOTkRMxEzMTABIgYHNTYzMhYXFhYzMjY3FQYjIiYnJiYDIgYHNTYzMhYXFhYzMjY3FQYjIiYnJiYBTjZ/OWyURHZTSV8vNX05aZdDb1hLXDA2gTdqlj9sYkFhNTx8M2iYRXZPWVcB/EA5nm4eIx8bQjmfbR0lHxgBlUE3nW0ZKRscRjOebiAhJRQAAAAAAQBmAKIEKQUEABMAR0AsBgINEREKCw8FAQACCBQVEg8DA08CAQACAQIOCwbHBgECBg8HLwevB+8HBAcAL10zM10SOTkvXV0XMxESARc5ETMRMzEwJTchNSETITUhExcHIRUhAyEVIQMBDmn+7wFUef4zAhGHhWwBEv6sfQHR/euD3d+SAQaRAR894pH++pL+5gAAAAIAZgAABCsE4wAGAAoAQUAmBQEABAoKBwEDCwyAAMAAAgAwA3ADsAMDAwIBAAMELwVfBQIFCAcALzMZL10XMxgvXS9dERIBFzkRMzMRMzEwJQE1ARUJAjUhFQQr/DsDxfz8AwT8OwPF9AGoZgHhn/6T/rz+bZGRAAIAZgAABCsE4wAGAAoASUAsBgIHBQEKAQcDCwyABsAGAgYwA3ADsAMDAwQFAwYEoAEBkAEBLwFfAQIBCAcALzMZL11xchczGC9dL10REgEXOREzETMzMTATAQE1ARUBFTUhFWYDBPz8A8X8OwPFAZMBQgFvn/4fZv5Y9JGRAAAAAgBqAAAEPQXBAAUACQAnQBIIAAcJAwYGCQADCgsJBwEFAQQAPy8SOTkREgEXOREzETMRMzEwEwEzAQEjCQNqAcNOAcL+Pk4BXv7J/ssBNQLfAuL9Hv0hAt8CCP34/fj//wAfAAAENAYfACYASQAAAQcATALBAAAADbcCAZcXFgInJQErNTUA//8AHwAABCMGHwAmAEkAAAEHAE8CwQAAAAu2AZcXFgIbJQErNQAAAAABAM8E2QPLBhAADQAmQBQDCw4PCg8DAQMDB6AAAQ8AXwACAAAvXV0yMi9dMxESATk5MTABIiYnMxYWMzI2NzMGBgJIwK8KqAlbcWdjC6oPvATZkqVoUlhiopUAAAH/j/4UAWIESgANAB9ADgILCAgODwkPAAVdWQAbAD8rABg/ERIBOREzMjEwEyInNRYzMjY1ETMRFAYtXkBFQ05JtJ3+FBmRFFVXBPT7EqSkAAAAAAEBhQTNAnsGFAAJABlACwkFCgtgCQEJgAMAAD8azF0REgE5OTEwATY2NzMVBgYHIwGFFycGsg1WMWIE5UK6MxI1uEgAAQFv/jsCdf+DAAkAHEANCQUKC2AJAQmADwMBAwAvXRrMXRESATk5MTABNjY3MxUGBgcjAW8WNgiyEGEzYv5UM7pCEj+7PAAAAQF7BNkCgwYhAAkAIkASBQAKC28IAQiAoAQBDwRfBAIEAC9dXRrNXRESATk5MTABBgYHIzU2NjczAoMbNAeyD2MyZAYIO7o6EzjAPQAAAAACABQCSgK6BbwACgARAEBAIAACEQUJAgILDgMFAxITDgMBBQUJDxEfEQIREQcDIAceAD8/EjkvXTMzETMRORESATk5ETMzMxEzETMRMzEwASMVIzUhNQEzETMhNTQ3BgcHArp9mf5wAZSVff7qBghapAMOxMRrAkP9zb9rZBKM8AAAAAABAD0CNwKPBbYAHgBCQCcbAwkdGAMQGBAfIBNvAH8AjwDfAO8A/wAGAAAQAAIAAAYcGR4NBiEAPzM/MxI5L11dMxESATk5ETMRMzMRMzEwATIWFRQGIyImJzUWFjMyNjU0JiMiBgcmJxMhFSEHNgFOkbCrqUqLKTiMNlxtbGM2Sx8dIiEB8f6FEkEEc5R7j54fF4kiJlFZT1URCBgRAapw4A0AAAEAOQJKApYFtgAGACBADwUBAQACAwcIACAFAgIDHgA/MxI5PxESARc5ETMxMBMBITUhFQGiAVz+OwJd/qMCSgLxe2T8+AAAAAMAMwI5ApoFxwAVACIALQBNQCoFKxAmFg0mEwMrBxwcKxMNBC4vBRAgIAooGigCDygfKAIoKAojAB8ZCiEAPzM/MhE5L11xMxI5ORESARc5ETMRMxEzETMRMxEzMTABMhYVFAcWFRQGIyImNTQ2NyYmNTQ2AxQWMzI2NTQmJycGBhMiBhUUFzY2NTQmAWh+l5SxqYmVoEpURzyfL1FVV1FbTxpERqhCSY88TEsFx3ZogkxKnHGLg3NFcy0sXUZnff1oO0hIOzxOGgogUwHsOzZaORdEODY7ABYAVP6BB8EF7gAFAAsAEQAXABsAHwAjACcAKwAvADMANwA7AD8AQwBHAFMAWwBrAHQAfACJASlAwGNkZHowPEAFBA8PADE9QQQMVE4DESAcSFgjHzQsbHZ2azcvYHBnejgYOxuHhAYSCSQoRAQXFyUpRQoEFBQShBt/GHpwL2ssH1gcEU4MEYqLY3V1e2yLbAJabGpsAgNsbGtcgn19VktLdmtaUURrVGtka9RrBCBrMGsCAnRRhWsEMFxAXHBcgFwEwFwBL1xPXAJcXAAKQipBKT5GPUUyJjElDRUQDAEZHS0TBA8PEhgcLAQMIDQ4BgQEByE1OQQFAQAvFzMRFzMvFzMzERczERIXOTkvXV1xFzNfXV0vMy8zMy8zMy8zERI5L19xcTMSORESARc5ERczMxEXMxEzETMRMxEzETMRMxEzETMRMxEzETMRMxEzETMRMxEXMzMRFzMRMxEzMTATESEVIxUlNSERIzUBETMVMxUhNTM1MxEhNSEVITUhFQE1IRUBIxEzESMRMwE1IRUBIxEzATUhFTM1IRUBIxEzNSMRMwEjETMFFAYjIiY1NDYzMhYFFDMyNTQjIiUzMhYVFAYHFRYWFRQGIyMTMzI2NTQmIyMVFTMyNjU0IwEiJzUWMzI1ETMRFAZUAS/ABc4BMG35AG/ABQ7Dbf1JARH74QEO/vIBDgS3bW1tbfvCARD8MG9vAsABEHcBEfqob29vbwb+bW37n4d/f4eHf36I/nOHh4eHAeGsbXAuLD0ubV7Pe0IuJCovO0oxJVoBXjQcKxlWfWkEvgEwb8HBb/7QwfkCAS/CbW3C/tFtbW1tBv5vb/qoAQ4CAgEP+jttbQGmAQ4ESm9vb2/8LwEQeQEP/WgBEEmRnJyRkpuak8XFxGFDUzFCCAgORDVRWQFiIiAiHeOaKyVK/voKZghWAZL+cl9jAAMAVP7BB6oGFAADAB4AKgAsQBcBCxclBB4fEQMJKyweKBQOKCIOIg4CAAAvLzk5Ly8zETMSORESARc5MTAJAwU1NDY3NjY1NCYjIgYHFzYzMhYVFAYHBgYVFQMUFjMyNjU0JiMiBgP+A6z8VPxWA+ssQWdJu6VPukdSoFo/PjFIVDsbR0ZCSUhDSEUGFPxW/FcDqfsvMkExUn5Yh5o4KrJQOi81SzZEcEo7/u0/SEk+QElI////j/4UAmMGIQImAjcAAAEHAUz+qwAAAAu2AQAaFQkKJQErNQAAAP//ABkDwQFOBbYCBgIHAAAAAgAM/+wE8gYfACoANABnQDcRGQgVDDIiAB0qKCgdHysiDAgHNTYrLykfJSoqKWRZKioFJRMOXlkTEwUlJS9dWSUBBRteWQUWAD8rABg/KxESADkYLysREgA5GC8rERIAORESORESARc5ETMRMxEzETMRMzMxMAEWFRAAISImNTQ3NjU0IyIHJzYzMhUUBwYVFDMgETQnJCQ1NDYzMgATMxUlLgIjIgYVFAQEagX+3P75xckPDkQsMCdeYbYOD+YBbgT+tf6Vu6nSAQMrkP66E2CHTllfAQ8DRjk0/p/+dKevPW9vHlIbfyu6K3d3Q8kCYjssA97HkaD+1P7ei4uEy2tXSIaUAAAAAQAAAAAEiQXDABUAKEATFBESEgkWFwASFAMSEgYLa1kGBAA/KwAYPz8SORESATk5ETMyMTABNzYSNjYzMhcVJiMiBgICBxEjEQEzAj9FOYVMXUA6JBgjLUWkfSO7/iPJAtufiQEibDIRjwZL/rv+52H94QIvA4cAAgAX/+wGmgRKABQAKABWQCsLJgUXCg0GFwMgHQ0mJh0DAykqEwgeHgAICxUGCAZdWQgPIxoAGl1ZEAAWAD8yKxEAMxg/KxEAMzMREjkYLxE5ERIBFzkRMxEzETMzETMRMxEzMTAFIiY1NBMhNTchFSMWFRQGIyInIwYBAhUUFjMyNjU1MxUUFjMyNjU0JwI3vcp//uiPBfT8cMu+3kUIR/7Me2x0XGiualt0aWoU6/DmAQdOSJb78vDruLgDyP7rzq2jjX23t4CKqKj66QAA//8AxwAABnsHdQImADAAAAEHAHYBngFUABNACwEdBSYBTh0ZBw0lASs1ACs1AAAA//8ArgAABtUGIQImAFAAAAEHAHYBzwAAAAu2AV4sKBIiJQErNQAAAP//AAD90QUbBbwCJgAkAAABBwJYATsAAAANtwMCAhQOBAclASs1NQD//wBe/dED1wRcAiYARAAAAQcCWADLAAAADbcDAgUsJggaJQErNTUA//8AAf/sBwsFzQAnADIBSAAAAQcCWf9uAAAADbcDAkEiHTIGJQErNTUAAAAAAgBz/dECN/+DAAsAFwA0QB4SBgAMBgwYGRUQAyADAgMPbwkBCUAZHEgJQAkNSAkALysrcTPEXTIREgE5OREzETMxMAEUBiMiJjU0NjMyFgc0JiMiBhUUFjMyNgI3fWhneHplY4JyQjEzQDo5M0D+rGN4dWRkdXVkODs7ODY9PQACAJMEaALXBcUACQAZACZAEhUQEAoFCQ4KBBobDQMXCYADAwA/GtzEEMYREgEXOREzETMxMAE2NjczFQYGByMlNDY3FQYVFB4CFRQjIiYBtBQ/DcMiejNU/t94fHsgJSBhN0YEhzW/OxREsj14TnMdSCk1FBMQGhxKRQD//wAfAAAG9AYfACYASQAAACcASQLBAAABBwBMBYEAAAAbsQMCuAH3tS0sAj0lAbj/aLQZGAI9JSs1KzU1AAAA//8AHwAABuMGHwAmAEkAAAAnAEkCwQAAAQcATwWBAAAAGbkAAgH3tS0sAjElAbj/aLQZGAIxJSs1KzUAAAIAff/sBnEGFAAUACAAP0AfECITABUGABsGGyEiDgATCwsDCQkeaVkJBAMYaVkDEwA/KwAYPysREgA5ETMYPxESATk5ETMRMxEzETMxMAEQACEgABEQACEgFzY2NTMXBgYHFgUQEjMyEhEQAiMiAgXB/p3+w/69/p8BYQFFAUW1RD/CDx+GaF37fvTu7fDv7PHzAt3+nv5xAYkBagFoAYbVE3yNFqCuJ67+/t3+0QErAScBJAEq/tIAAAAAAgBx/+wFKwTyABcAIwBSQC0TJRYAGAcAHgceJCUAESARoBHQEQQRDxEBFwMRFg0NAwoKIV1ZChADG11ZAxYAPysAGD8rERIAOREzGC9fXl1eXRESATk5ETMRMxEzETMxMAEQACMiJgI1EAAzMhYXPgI1MxcGBgcWBRQWMzI2NTQmIyIGBGj+8PCV5nwBDPJstkIyOxzBDiB+akX8w56kqZibqamWAif+8/7SiwEErAEMAStJRA9DZWoXoa8njLHYztbQzdPTAAABALj/7AaLBhQAGgA4QBsGHBMQAQoKGRAZGxwEAAkBAQ0aEQMNFmlZDRMAPysAGD8zEjkvMz8REgE5OREzETMRMxEzMTABFTY2NTMXBgYHERAAISAANREzERQWMzI2NREFH1JNvw4hsJv+3/72/vD+1LnCxbS8BbbGC4GYFrm6Gv2T/wD+6AEg/AOu/EqxxL65A7QAAQCi/+wFqgT0AB0AWkAyDx8BHAoTEwcVHBUeH1ANAQANIA2gDdANBBEPDQEXAw0WGR0SCgoUCB0PFBUZBF1ZGRYAPysAGD8/MxI5LzMREjkvX15dXl1xERIBOTkRMzMRMxEzETMxMAERFBYzMjY1ETMVNjY1MxcGBgcRIycjBgYjIiY1EQFYd32pmrVQSb8OILGVlBoJMrJ0ycoESP0/hYG80QI6eQ2AmBe/vRH8sJFPVr7RAs0A///8TQTZ/ekGIQAHAEP6ygAAAAD///0HBNn+owYhAAcAdvuEAAAAAP///BME2f8DBeEABwFS+xEAAAAAAAH9BAS4/ncGkQAQAC1AHgIFBQofDwEPAAQgBPAEA98EAQ8ELwRfBH8EzwQFBAAvXXFxxF0yOS8zMTABFAcHIyc2NjU0IyIHNTYzIP53pgpvDkpYhjUtJUwBAgXXjCZtrg0uMFIIagwAAf0x/pj+Bv99AAsADrYDQAlQCQIJAC9dMzEwBTQ2MzIWFRQGIyIm/TE/LCs/Oy8wO/Y8Nzc8NT08AAD//wDHAAAD+AdzAiYAKAAAAQcAQ//QAVIAFbQBDQUmAbj/wrQRFQILJQErNQArNQD//wDJAAAFYAdzAiYBsgAAAQcAQwBmAVIAFbQBEwUmAbj/o7QXGxEJJQErNQArNQD//wBx/+wEGwYhAiYASAAAAQYAQ7cAAA65AAL/wrQhJQMKJQErNQAA//8ArgAABHUGIQImAdIAAAEGAEPiAAAOuQAB/6K0ExcNBiUBKzUAAAABAIP/7AeiBckAMgBQQCgEKxsoIhYrKAkwMCgWAzM0EBkpKRMZAB8ZH2lZBhkELSUTJWlZDBMTAD8zKxEAMxg/MysRADMREjkYLxE5ERIBFzkRMxEzETMRMxEzMTABIgYHJzYzMgAREAAhIiYnIwYGIyAAERAAMzIXByYmIyICERASMzI2NxEzERYzMhIREAIFrDxfLEl+mukBBf7h/vx0rksGSapz/vr+4wED6Zx8SixePJKhy7o+fDG5You5zKMFJSsdmFT+iv6r/o3+YTIyMjIBnQF1AVMBeFSYHSv+3P77/tn+ticlAcP+PUwBRQEsAQgBIQAAAAEAAAAABi0ESgAiACtAFgUcDxADIyQPAAAKFwMEGxAFDyAVBBUAPz8/MzMSFzkRMxESARc5MjEwAQYHByMBMxcSEhczNjc2NwMzEx4DFzM2EhEzEAIHIwMmAy0idTbf/n+6WGRqFggdS2EcpsPJDCAgGwcIppS0xdm+fR0BwWnpbwRK+P7o/rVsVpzKQAHL/a4lYWNZHrQBrwFP/pH+BuEBUEsAAgASAAAE/AW2ABEAGgByQEAGFgQIEhIBDwsWFg8RAxscBxEAEWtZBA8AAQ8GAAAIAggaaVnYCAE6CAEJCAEPAAigCAISAwgIDwIDDxJrWQ8SAD8rABg/EjkvX15dXl1dXSsREgA5GC9fXl0zKxEAMxESARc5ETMRMzMRMzMRMzEwEyE1MxUhFSEVMyARFAQhIREhATMyNjU0JiMjEgE6ugGe/mLBAjX+8P75/mf+xgH0y8CsuNOsBNHl5Zzp/mDU2AQ1/GeHiYl1AAAAAgASAAAEpgUnABEAGgB6QEgABBMTDwsHFxcCCw0EGxwQDgQSXVmEBJQEAgZFBAEDHwQBDQTdBO0EAxAFYARwBAIPBAEUAwQECw4DDQ4NXVkADg8LE15ZCxUAPysAGD8zKxEAMxESORgvX15dcV9eXV1fXV9dKwAYEMYREgEXOREzETMzETMzMTABIRUhESEgERQGIyERIzUzNTMRESEyNjU0JiMBtAFa/qYBNQG94eD+HfDwsgEpiI+AmwRKlv7R/sulqwO0lt38yf6hXFlZUQAAAAABAMf/7AclBcsAIQBjQDoYFBQVBhkeDAUZEhUGIiMGExgTaVkD2BgBOhgBCRgBDwAYoBgCEgMYGBUWAxUSHABpWRwEDwlpWQ8TAD8rABg/KwAYPz8SOS9fXl1eXV1dMysRADMREgEXOREzETMRMzEwASIGByEVIRIAMzI3FQYGIyAAAyERIxEzESESACUyFwcmJgWP3f8dArT9RwoBBuycxV2tcf7B/qUK/rC4uAFWIAFvATLatUppnQUp9Omg/vX+7DqgIhkBbQFR/VYFtv2WAS8BTgJenDMlAAAAAQCu/+wFqAReACEAf0BMCQUFBhUYGAoDDx8fFwMGBCIjGAQJBF1ZFYQJlAkCBkUJAQMfCQENCd0J7QkDEAWgCbAJAg8JARQDCQkGBw8GFQ0SYVkNEAAbYVkAFgA/KwAYPysAGD8/EjkvX15dcV9eXV1fXV9dMysRADMREgEXOREzETMzETMRMxEzMTAFIgAnIREjETMRITYkMzIXByYjIgYHIRUhFhYzMjY3FQYGBH/t/vYN/ue0tAEbGAEL4aOENYBynZ8QAg398Qmkn1mGPD2AFAEI9f4XBEr+N+j1O5Q0nqSYtq4lGZwfHAACAAAAAAV3BbYACwASAFRALgsUCA0DDAMEBBMUDRABCwQQCAkCBgwGa1k4DAGaDAFpDAHfDAEMDAkABAgSCQMAPz8zMxI5L3FdXXErEQAzERI5X15dERIBOREzMxEzMhEzMTAhASMRIxEjASMBMwEBIScmJwYGBLT+6pCoj/7nvgJisgJj/KABSVA6HAouAqT9XAKk/VwFtvpKA0LGlmQniQAAAAIACAAABIkESgALABIAdkBNDQUMBQYBBgoDExQQCgsECAwIXVkEDAH0DAEGtQwBA48MAU0MXQwCfQwBBf8MAQ8MjwyfDM8M3wwFLww/DL8M7wz/DAUMDAsGAgoVCw8APz8zMxI5L11xcl9dcV1fXV9dcSsRADMREjkREgEXOREzMxEzMTABASMDIxEjESMDIwEDISYnIwYGArYB07jLbKJxxrkB0RgBDmMgCAsYBEr7tgHh/h8B4f4fBEr+K/VnI0QAAAACAMcAAAdvBbYAEwAaAGdAORIcFQIUAwcOCgoLAgMDCAsDGxwYCxABBQkOCWlZFDgOAZoOAWkOATAOAZAOAQ4OEAMHEwMLEgwQAwA/Mz8XMxI5L11xXV1xxSsAEBjEMhESORESARc5ETMRMxEzMxEzETMRMzEwASMRIxEjASMBIREjETMRIQEzASMBIScmJwYHBZGLpo3+6cQBHP5ruLgB2QECtAJhyf1sAT5KPBgXQQKq/VYCqv1WAqr9VgW2/ZYCavpKA0rEmFxeogACAK4AAAYxBEoAEwAbAIFATxkFGAYKEQ0NDgUGAQYLDgQcHRQOEwgEDBEMXVkYBBEB9BEBBrURAQOPEQFNEV0RAn0RAQX/EQGPEZ8RAi8RPxG/EQMRERMCBgoDDhUPEw8APzM/FzMSOS9dcXJfXXFdX11fXXEzKxEAMzMREjkREgEXOREzETMRMzMRMxEzMTABASMDIxEjESMDIxMhESMRMxEhExcjBgcHIScmBGAB0bbNbKJrzbjP/t+wsAFjwnMIKCQ4AQs6IwRK+7YB6f4XAen+FwHp/hcESv41Act5gFOBkVkAAAAAAgAXAAAFvAW2AB8AIgBnQDwPEAIgBwEhIBAdIh4YCSMkIB0fDhIdEmxZAsgd2B0COh0BCR0BDw8dASYDHR0fEAgYEgEeIh8fImpZHwMAPysREgA5ORg/MzMSOS9fXl1eXV1dMysRADMREjkREgEXOREzETMxMAEVAR4CEhMjAy4CIyMRIxEjIgYGBwMjEz4CNwE1AQEhBTf+X3OZZl1XvIcgQmNSHLkaUWE/IoXEhy5il3L+ZwJLAW/9IwW2h/4VB02V/sj+3QHBaWEr/UoCtildb/4/AcWbj00IAeuH/aIBuAAAAgAMAAAFDgRKACAAIwCBQE8hAiIeEBECIwgBIxEeHxkHJCUPEx4TYFkCI9Qe5B4CBpUeAQNvHgEtHj0eAl0eAQVvHn8eAg8eHx6fHgMLAx4eIBEJGRUBHyIgICJdWSAPAD8rERIAOTkYPzMzEjkvX15dcV9dcV1fXV9dMzMrEQAzERIBFzkRMxEzETMRMzEwARUBHgMXEyMDLgIjIxEjESMiBgYHAyMTPgI3ATUFIQEEi/60U2xILxiBtIEhN09GC6YOQk83I4SygTZOcVn+tAMZ/csBGgRKaf6eCDJOaT7+sAFMVUQd/f4CAhxCWP60AVCKYjkKAWJplP7LAAAAAAIAxwAAB9UFtgAkACcAekBJJiIXIR0dHg8QAicHASUnECIbIx4JKCkOEiESa1khHGlZAifYIQE6IQEJIQEPACGgIQISAyEhJAgQFwMeEh8jAQMmJCQmalkkAwA/KxESABc5GD8XMxI5L19eXV5dXV0zMysrEQAzERIBFzkRMxEzETMRMzMRMzEwARUBHgIXEyMDLgIjIxEjESMiBgcDIzcSNjchESMRMxEhATUFIQEHTv5gc5lkLom2iSRCZVYXuhV+cyuFwSdbUSP+XLi4Asf+bgO7/SQBbwW2h/4TB02Om/47AcFuXSj9TAK0YpH+P4ABO8ol/VYFtv2WAeOHpv5GAAAAAgCuAAAGvgRKACUAKACLQFQmAicjGSIeHh8QEQIoCAEoESMcJB8IKSoOEyITYFkiHV1ZAiiEIpQiAgZFIgEDHyIBDSLdIu0iAxAFDyIBFAMiIiUJERkDHxUgJAEDJyUlJ11ZJQ8APysREgAXORg/FzMSOS9fXl1fXl1dX11fXTMzKysRADMREgEXOREzETMRMxEzMxEzETMxMAEVAR4DFxMjAy4CIyMRIxEjIgYGBwMjEzY3IREjETMRIQE1BSEBBjv+tVNrSC8YgbSBIThRQwulC0RROCKDs4EtJf7PsLACK/62Axn9ywEaBEpp/pwIMU5oPv6wAUxURBz+AAIAHEJW/rQBUHQo/hQESv43AWBplP7RAAAAAAEAPf5KBEIG0wBLAJlAVSgTCj42GUFCQjssHBwAABM7MD4hRRkZITAqEwVMTTAqLi44DzMfMy8zAwkDMypBHB0dHGtZDx0BOh0BAw8d3x0CDwYdHSoWSCokbFk7KgQQA2lZECMAPysAGD8zKwAuMxI5GC9fXl1fXXErERIAORgQxF9eXTIyLxI5ERIBFzkRMxEzETMRMxEzETMRMxEzETMRMxEzMTAXFBYzMjc2MzIXFSYjIgcGIyImNTQ2NzY2NRAhIzUzMjY1NCYjIgYHJzY3Jic1MxYXNjYzMhcVJiMiBgcWFhUUBgcVFhYVFAQFDgL6VFhgeHhBmURGoUJsbWi2t9zpxbr+PdDI19efiG3DY1qnwTOsg1yDXYNBNzAfJyxvMK3Ev6i7y/7f/uVgdjaHNTIHBieuMwUFgoaDgwoGg44BBpqRe2p7PEF9chw6tRs7iHVWDnUMUkcXvo6OtxkIGLSOz9gHAxsuAAABABn+cwOPBVAARgCdQFo8KQMgIAsNDg43Mz5AFxcpCEQLNxEvLzdEPikFR0gNMzQ0M11ZlTQBaTQBODQBWDQBbzQBDzQfNJ80AwsDNDQ+LBQmGl1ZJkQ+QkIFCABACQxIAD4+OV5ZPhAAPysAGBDEKzMyMi8SOS8rAC4zEjkYL19eXXFdcV1dKxESADkREgEXOREzETMRMxEzETMRMxEzETMRMxEzETMxMAEyFxUmIyIGBxYWFRQHFRYWFRQGBwYGFRQWMzI2MzIXFSYmIwcGIyImNTQ2NzY2NTQmIyM1MyA1NCMiByc2NyYnNTMWFzY2AwQ6KhgrL2UteozSgnX74oNzTVduvkt7KRtaK7FyapWgwbynoJyhkHcBN/mNqT+EdmtWg0iNWYgFUA51Ck08HIpruzgIJYZkmaYCAy48MioKKZcXFAUGe3V6gAQCXVtgV5OonEaPNhCDUhsyi3BVAAAA//8AbQAABgIFtgIGAXUAAP//AKL+FAWaBhICBgGVAAAAAwB9/+wFwwXNAAsAEgAZAFlANBYQEAYAFw8GDxobFhBpWRgWAXoWAUkWAW8WfxYCDxavFgILAxYWAwkJE2lZCQQDDGlZAxMAPysAGD8rERIAORgvX15dcV1dcSsREgE5OREzMxEzETMxMAEQACEgABEQACEgAAEyEhMhEhITIgYHISYmBcP+nf7B/r3+nwFfAUcBPgFi/V7e8wz8RA3z4dr0EwO6E+8C3f6h/m4BiwFoAWUBif5x/E0BCwEE/vz+9QSg+/f4+gADAHH/7ARoBF4ADAASABgAbUBDFhERBwAXEAcQGRoWEV1Z1BbkFgIGlRYBA28WAS0WPRYCXRYBBW8WfxYCDxYfFp8WAwsDFhYDCgoTXVkKEAMNXVkDFgA/KwAYPysREgA5GC9fXl1xX11xXV9dX10rERIBOTkRMzMRMxEzMTABEAAjIiYCNRAAMzIAATI2NyESASIGByECBGj+8PCV5nwBDPLoARH+BZyYC/1/EgEtmJcOAn8eAif+8/7SiwEErAEMASv+z/1UuLD+mANGpqIBSAAAAAEAAAAABVIFwwAVACJAEAYWFBcKBQYDBRIRAGtZEQQAPysAGD8/EjkRATMRMzEwASIGBwEjATMBFhc2NxM+AjMyFxUmBOw9TjT+tNP98sEBTUYhHUWkO1RuWSpXOAUraKr75wW2/FjBkYveAga+mEIVlxQAAQAAAAAETARUABYAIkAQAhcQGAYAAg8NEmRZDRAAFQA/PysAGD8SOREBMxEzMTAhAgEzARYXMzY3EzY2MzIXFSYjIgYHAwGYjP70vAEFQgkIHyGPNm9wJi4dKS45HPwBcgLY/Sm/NaVfAb+nawyMC1JW/OEAAAD//wAAAAAFUgdzAiYCfQAAAQcDcwTdAVIAGbYCASEFJgIBuP9/tCUfBhQlASs1NQArNTUA//8AAAAABEwGIQImAn4AAAEHA3MEbwAAABCxAgG4/5S0JiACECUBKzU1AAAAAwB9/hQJuAXNAAsAFwAuAEhAJwwGABIYJyEuJxIGBS8wHBguFSAYDwkVaVkJBAMPaVkDEyUqXVklGwA/KwAYPysAGD8rABg/Mz8SORESARc5ETMRMxEzMTABEAAhIAAREAAhIAABEBIzMhIREAIjIgIlMxMWFzM2NxMzAQYGIyInNRYzMjY3NwVa/rj+2v7W/rsBRQEsAScBRfvj29TU2NjS094Ebb70RBgJEE7bvv4rRb+LTko3QlV3KjkC3f6d/nIBhwFsAWoBhP5z/p3+3P7SASoBKAEnASf+2Ef9i7F4UdoCc/setp4RjwxcZpL//wBx/hQIiwReACYAUgAAAQcAXAR3AAAAC7YCCBgiADElASs1AAAAAAIAff+HBhQGLQATACgAUUAqFAomDQciHAAfHxwHFwoFKSoPBQcRIg0kJg0malkNAxocAxcHBxdpWQcSAD8rERIAOTkyGD8rEQAzEjk5ETMyERIBFzkRMxEzETMzETMxMAEQAAUGIyInJAAREAAlNjMyFwQAARQSFzY2MzIXNhI1NAInBiMiJwYCBhT+0P73Gnd8FP71/s4BLQEQFHx2GQENAS77KcO6EUk2aSS7wMK5H25xH7rDAt3+z/51K29vKAGIATkBNQGCK2xsLP51/tPu/tUqMCZWKgEr7vABKChYVij+1gAAAAACAHH/kwTVBLQAFwArAE1AKBgMKQ8aIxUDACEhAx8aDAUsLSYjKQ8pXVkSFQ8QHR8aCRpdWQYDCRUAPzMzKxEAMzMYPzMzKxEAMzMREgEXOREzETMzETMzETMxMAEUAgcGBiMiJicmAjU0Ejc2NjMyFhcWEgUQFzY2MzIXNhEQJwYGIyImJwYGBNXe0glAODk/Cc3l49EIPjk2QgnN4/xW/Aw8NWYZ+PgOPTQ2PQyDdwIn6P7dJjUuLTgkASjj6AEhJDYqKjgm/trf/qE7KiJKPAFcAVU+KiEiKx/MAAAAAAMAe//sB5EIRAATAEUAVwCPQBdSSU1DNkZNKh0dASVNOwo2B1hZkEkBVbj/wLMTF0hVuP/AQDUKDkhJVUlVOQcAAQEACRAJIAmQCaAJBQoJCQcPDh8OAgsDDiBAOUBpWSc5BBoUMxRpWS0zEwA/MysRADMYPzMrEQAzGC9fXl0zMy9eXTMRMxESOTkvLysrXRESARc5ETMRMxEzETMzMTABFSMiLgIjIhUjNTQ2MzIeAjMBMjY3FhYzMhIREAIjIgYHJic2MzIAERAAISImJwYGIyAAERAAMzIXBgcmJiMiAhEQEgEUBgc1NjY1NC4CNTQ2MzIWBawQV5B4Yypqg3xtOnF3hE79I16qOz6tWbnOo5A8YSotG3mc6gED/tv++nGmSUuncP73/uABBeiceRstKV88k6POAoR7eDo+HyYfNS05RAfNfyMqI3Qebm4lLSX4vko/QEkBSgEnAQgBIS0dWj5W/of+rv6M/mIvLzAuAZwBdgFVAXZWPlodLf7e/vn+2f62BlpQdRxKEjIaFBIRGhwmJ0YAAAMAb//sBhcHDAAqAD8ATgClQBtKQ0YUCEBGKB0dLCJGDjYIB09QGSAJDEgZFky4/8CzExZITLj/wEBDCg1IQ0xDTAsyKywsADUQNSA1kDWgNQU1NTIAOhA6AvA6AQ86HzrfOgMKAzpADRBIOh8RCxFhWSULEBsWBRZhWQAFFgA/MysRADMYPzMrEQAzGC8rX15dXXEzMy9dMxEzERI5OS8vKysROSsREgEXOREzETMRMxEzMzEwBSInBgYjIgIREBIzMhYXByYjIgYVECEyNjcWMyARECMiByc2NjMyEhEQAgMVIyIuAiMiBhUjNTQ2MzIeAjMFFAYHNTY1NCcmNTQzMhYEN5RhL3BT5vvRwj94KjtbRXJtASk5ckd0fQEn3UdbOyl7P8HR/FIRV5B4Yio1NoN6cDpwd4NN/vB9d3cxMWI5RBRFICUBJwEMARQBKyAZlDTR0/5kJS9UAZwBpDSUGSD+1P7t/vP+2gasgSQqJDc+H25rJCwk6FF0HEgoOh0RECxORgD//wCD/+wHogcKAiYCaQAAAQcJaAHBAWYAFbQBPgUmAbj//bRANBYJJQErNQArNQD//wAAAAAGLQWkAiYCagAAAQcJaADTAAAAC7YBCjAkBRwlASs1AAAAAAEAe/4UBOkFywAWAC9AGAMOCQoUCg4DFxgKGxIAaVkSBAsGalkLEwA/KwAYPysAGD8REgEXOREzETMxMAEiABEQADMyNxEjESAAETQSJDMyFwcmA0jz/uoBBP9vR7n+pv6WsAFH2ua3S6wFJ/7D/u/+3/7ZGf1qAdgBgQFu4QFYt1aeUAAAAAABAHH+FAOqBF4AGAAxQBgJFg8DFhcDFxkaFxsGDGFZBhAAEmFZABYAPysAGD8rABg/ERIBOTkRMxEzETMxMAUmABEQADMyFhcHJiMiBhUUFjMyNjcRIxECc/7+/AES/k2fPTWWZKqmqKRCWSm0FAIBHwESARUBKh8cljTK1tfFGRL9ZAHYAAAAAQBo//wEdQUGABMAN0AeEg0IAAMRBhAHDQoODAoHBgMCBAgUFQkPBRMECwESAD/NFzkREgEXOREzETMRMxEzMxEzMTABAycTJTcFEyU3BRMXAwUHJQMFBwICtnu2/uFEASHL/t9FAR+4ebgBIUb+48wBHkMBN/7FQwFApnWoAWKmd6gBPUX+wqZ1pv6gqHUAAQDJBI8DrgW2ABMAMEAJAAYQCgYKFBUDuP/oQA4JD0gDADAPCS8JXwkDCQAvXRrJMisREgE5OREzETMxMAEGBiMiJjU0NjMhNjYzMhYVFAYjAYcGKjA1KSo2AcMGLDAzLSw2BO4tMjQ1NSsvLzE1OCoAAAABAPgE4wPfBd0AEwA4QA0SCBQVExISoAmwCQIJuP/AQBIJDEgJCQwPBB8ELwRfBM8EBQQAL10zMy8rXTMvMxESATk5MTABMjc2MzIWFRUjNTQjIg4CIyM1AQR4lpVRbXqBaitkeI9WEAVoOzprbiETZCQrJIEAAAABAd0E1QLTBjkADwAaQAoGDgsLAAAQEQ4DAC/EERIBOREzETMzMTABNDYzMhYVFA4CFRQXFSYB3UY5LzMfJB939gW4OUgpJxsZEBIUOiRMOgAAAAABAd8E1QLTBjkADwAYQAkKAgAFBRARAg0AL8QREgE5ETMzMzEwARQHNTY1NC4CNTQ2MzIWAtP0dx8kHzQuOUQFuKk6TCU5FBIQGRsnKUgAAAgAKf7BB8EFkQAMABoAKAA2AEQAUgBfAG0AsUBpUDRILAsYAxBCJjoeVh5eJhAYLGM0awpuby0mHwMQNAE0KSIwMBspZF5XAxBrAWtgWmdnU2BJQjsDEFABUEU+TEw3RSlgRUVgKQMAERAYARgUUA2ADQIPDQENBBALAQuABw8APwBvAAMAAC9dMhrNcTIvXV0zzXEyEhc5Ly8vETMzETMQzXEXMhEzMxEzEM1xFzIRMzMRMxDNcRcyERIBFzkRMxEzETMRMxEzETMxMAEyFhcjJiYjIgYHIzYTMhYXIyYmIyIGByM2NgEyFhcjJiYjIgYHIzY2ITIWFyMmJiMiBgcjNjYBMhYXIyYmIyIGByM2NiEyFhcjJiYjIgYHIzY2ATIWFyMmJiMiBgcjNiEyFhcjJiYjIgYHIzY2A+ldcQdPBTxFTjIFSwvFXHMGTwU8RU4yBUsFZAKrXHMGUAU8RE4yBUwFZfvmXHMGUAU8RE4yBUwFZQToXHMGUAU8RE4yBUwFZfvmXHMGUAU8RE4yBUwFZQWnXHMGUAU8RE4zBUsL+tRccwZQBTxETjIFTAVlBZFlXSwsKS/C+fJmXCwsKS9ZaQEXZl0tKycxWmlmXS0rJzFaaQPbZl0tKycxWmlmXS0rJzFaaf4YaFosLCgwwmZcLSsnMVpoAAgAKf5/B30F0wAHAA8AFwAfACYALQA1AD0AaUBECQUNARUkOhc9IAEFJzUYMiscDj4/IyYqLU87XzuvO787BDs2QDNQM6AzsDMEMy42LRcfJi4HCAgHLiYfFy02CAwFDAQAPy8SFzkvLy8vLy8vLxDNXRDNXRDNEM0REgEXOREzETMxMAUXBgYHIzY3Ayc2NjczBgcBNxYWFxUmJwUHJiYnNRYXATQ2NxcGBwEUBgcnNjcDIiYmJzcWFwEXFhYXByYnBDcLEUYkYTUROwsTSR9hNBICIw5HyEHdgftoDkK/T92BA6aumEXqP/zou4tFvWsoEThQD0N7TANoEyZaF0OQNyMOQr9P3YEEmA5HyEHcgv4WCxNJH2E1ETsLEUYkYTURAagXWzhEmC78lRdeM0R1TwLgV8AuRsZj/OkEQsI9Rt5LAAIAx/5/BiUHYgAUACIAXUAzDA4CBQUUCREOCg0NDiAYFAUjJB8PGO8YAgkYGBwPFQEiAxUGEhQHAAMUEgwiDglpWQ4SAD8rABg/Pz8zEjk5xl9eXTIyL15dMxESARc5ETMRMzMRMxEzETMxMBMzERQHBzMBMxEzAyMTIxE0NyMBIwEiJiczFhYzMjY3MwYGx6wLBAkDJMvJlNOeqhMJ/NfMAkm+rQulCl1uaWMJqg2/Bbb825WxUQS8+uz93QGBAxup+vtCBiuPqGxOXV2jlAACAK7+hQUxBhAAEQAfAGJANwkLAQMDEAYMDgsHCgodCxUQBSAhCSIcDxUBFRUZoBIBDxJfEgIJAxIDDhARDwQPEBULBl1ZCxUAPysAGD8/PxI5OS9fXl1dMzMvXTM/ERIBFzkRMxEzMzMRMxEzETMxMAERFAcBMxEzAyMTIxE3NwEjESUiJiczFhYzMjY3MwYGAVgMAkzdvIG4fagDBf223QH0vq4LpghccWljCaoMvgRK/YFfugOY/E797QF7AomPgPxoBEqPkKdnU11doJcAAgAvAAAEgwW2ABEAGgB6QEcRDwQIEhIBDwsWFgYPAxscBxEAEWlZBAgAAQAAEAAgAAMPAwAACAIIGmlZ2AgBOggBCQgBDwAIoAgCEgMICA8CAw8Sa1kPEgA/KwAYPxI5L19eXV5dXV0rERIAORgvX15dXTMrEQAzERIBFzkRMxEzMxEzMxEzMTATMzUzFSEVIREzIBEUBCEhESMBMzI2NTQmIyMvmLgBUP6wuAJM/ur+7f5tmAFQ0b22t8TJBP64uKD+7v5g0dsEXvw+h4mDewAAAAACABIAAARUBhQAEQAaAIxAVg0LAAQTEw8LBxcXAgsDGxwDDQ4NX1kADg4EEAQSXVnUBOQEAgaVBAEDbwQBLQQ9BAJdBAEFAAQBAAQwBAJvBAEPBJ8EzwTfBAQLAwQECxAACxNeWQsVAD8rABg/EjkvX15dcXFyX11xXV9dX10rERIAORgvMysRADMREgEXOREzETMzETMzETMxMAEhFSERISARFAYjIREjNTM1MxERITI2NTQmIwFiAS/+0QE0Ab7i4f4dnJy0ASeIj4CbBSGJ/e/+yaSsBJiJ8/vc/qFbWllRAAACAMcAAAR9BbYADwAcAFlALQQDGBQTEAoKCxYTABgYEwsDHR4DBgwJFhMcEBUJDAkQa1kJCQwLEgwca1kMAwA/KwAYPxI5LysREgA5ERI5ORESOTkREgEXOREzETMRMxEzETMRMzMxMAEUBgcXBycGIyMRIxEhIAQBMzI3JzcXNjU0JiMjBH1yaXVpkWKKsrgBkQEPARb9AqJfOGZxhXa7wcMECIHIOJlYvhv9xwW22P35CIdWqkaojYwAAAIArv4UBHsEXgAXACgAXUA0FBEcCgMDBgYHJCETFhEmJhYhIgcFKSohJCMDGB8LAhYTBAAOCA8HGw4YXVkOEAAfXVkAFgA/KwAYPysAGD8/ERIXORESFzkREgEXOREzETMRMxEzERczETMxMAUiJyMWFREjETMXMzY2MzISERAHFwcnBgMiBgcVFBYzMjcnNxc2NTQmArbddwwMtJQaCECmbtbtsnBqgURyo5EClKYsJndwfV2RFJ+UIP49BjaWWVH+1/7y/q+QmlSsGAPbuMUj38cMmlSiZ+nQzgABAC8AAAQOBbYADQBFQCQKCAMHBwwIAQUIAw4PBgoLCmlZAw8LAQsDCwsNCBINAmlZDQMAPysAGD8SOS9fXl0zKxEAMxESARc5ETMzETMRMzEwARUhESEVIREjESM1MxEEDv1xAab+WriYmAW2pP4RoP19AoOgApMAAAAAAQAQAAADTgRKAA0AR0AmCQcCBgYLBwAEBwMODwUJCglkWQIPCh8KAg4DCgoMBxUMAV1ZDA8APysAGD8SOS9fXl0zKxEAMxESARc5ETMzETMRMzEwASERIRUhESMRIzUzESEDTv4SAVj+qLScnAKiA7L+tov+IwHdiwHiAAEAx/4ABOwFtgAbAExAKAcZFAkDAwQOGQQZHB0LAGlZDwsBCwMLCwQFERdpWREmBBIFCGlZBQMAPysAGD8/KxESADkYL19eXSsREgE5OREzETMRMzMRMzEwASIHESMRIRUhETYzIAAREAAhIiYnNRYzIBE0JAI3Xlq4A1L9Zl94AT4BWP7f/wBVgEZ7iQF3/v8Cgwz9iQW2pP4JCv6q/sf+xf6lFRykMQHy7/4AAAAAAQCu/goECARKABoASkAnEQcCEw0NDhgHDgcbHBUKYVkPFQEUAxUVDw4VDxJdWQ8PAAVhWQAcAD8rABg/KwAYPxI5L19eXSsREgE5OREzETMRMzMRMzEwASInNRYzIBE0JiMiBxEjESEVIRE2MyAAERACAk6Mam5+AQqttU48tAKq/gpSPAENAQvq/go8nz0BldfLDv4vBEqY/r8M/uH+3f70/tsAAAAAAQAC/n8HFAW2ABUATEAqCAwBFQYREQMSDA0NCRIAFQUWFwAJAxMGEAYVBwQBAxIVEg0iDwppWQ8SAD8rABg/PzM/MzMSFzkREgEXOREzETMzETMRMxEzMTABATMBETMRATMBATMRIxEjAREjEQEjAk79ycwCL7ECL8z9yQHLwrBm/cWx/cPTAvACxv08AsT9PALE/Tz9sv3bAYEC5f0bAuX9GwABAAL+hQY9BEoAFQBLQCoECQINDRUOCAkJBQ4SExEGFhcSBRUPDAIGERMJIgMAEw8OERULBl1ZCxUAPysAGD8zPzMzPxESFzkREgEXOREzETMzETMRMzEwATMRATMBATMRIxEjAREjEQEjAQEzAQKqqAG+w/47AWfIrmL+Jaj+Jc0B7P47xQG8BEr96wIV/ev+Yf3vAXsCLf3TAi390wI1AhX96wAAAP//AE7+PQRGBcsCJgGxAAABBwN8AV4AAAAOuQAB/+O0My0NByUBKzX//wBE/j0DjwReAiYB0QAAAQcDfAEMAAAADrkAAf/ytC8pGRMlASs1AAEAx/5/BT0FtgARAEVAJA8DDAgICREGAgMDBgkDEhMGEQwDBwcJDgoDCRIDIgUAaVkFEgA/KwAYPz8/MxI5ERczERIBFzkRMxEzETMRMxEzMTAlMxEjESMBBxEjETMRNwEzAAEEgbywcP33lbi4fgIJ1/7R/uyk/dsBgQK6g/3JBbb9L4sCRv6w/tMAAQCu/oMEVgRKAA4AP0AhAQYOCgoLBQYGAgsDDxAOCQIDCwwGIgAMDwsVCANdWQgVAD8rABg/PzM/ERIXORESARc5ETMRMxEzETMxMAEzAQEzESMRIwERIxEzEQM3xf4rAXe4rFj+ELS0BEr97/5f/esBfQIt/dMESv3rAAEAxwAABPQFtgATAFBAKAwPDxUGAgIDChISBxMDExQVExEDAAcKCAMLBgYADgMBAQMLBAMQAxIAPzM/MxI5ERczERIXORESOTkREgE5OREzMxEzETMRMxEzETMxMAEHESMRMxE3ETMVATMAAQEjAREjAfx9uLh9fQGN1/7R/uwCWtn+Xn0Comv9yQW2/SuOAV7TAbz+sP7T/McCRv7ZAAABAK4AAARIBEoAFABJQCYHAwMECw8TEwgUEA0UBAQVFgASFAMRAggLCQMMBwIHBAwFDxEEFQA/Mz8zEjk5ERIXORESFzkREgEXOREzMxEzMxEzETMxMAEmJxEjETMRNxEzFQEzARUBIwEVIwHVTiW0tHODAQTF/jcB8NH+4YMBtE4p/dUESv3peQFKxQEZ/h5s/gQBM9cAAAEALwAABPQFtgAWAFhALAoNDRgGERYUBAgTEwEUDBEUERcYBwQWAAAWaVkAAAIIEQwDEhIUCQIDDhQSAD8zPzMSOREXMxE5LysREgA5ORESATk5ETMRMzMRMzMRMxEzETMRMzEwEzM1MxUzFSMRATMAAQEjJgInBxEjESMvmLjV1QKH1/7R/uwCWt2D/YOVuJgFCK6uov57AtX+sP7T/MeuAV6ugf3HBGYAAAABABIAAAQxBhQAGABQQCkJCAQVFRgBFhATEQ8GExYFGRoTEAoUFA4SFhUHGAAYX1kEAAACDg8CAAA/PxI5LzMrEQAzGD8zEjkRMzMzERIBFzkRMxEzMzMRMzMzMTATMzUzFSEVIREHMzc2NwEzAQEjAQcRIxEjEpyyAXf+iQYIE0AsAV7V/kQB2df+g32ynAVcuLiF/fyeGVsuAXP+K/2LAf5r/m0E1wABAA4AAAWLBbYADQBEQCMEBgYPAgoKCwUICAsNAw4PAggFAwkJCwADAwcLEgANaVkAAwA/KwAYPzM/ERI5ERczERIBFzkRMxEzETMRMxEzMTATIREBMwEBIwEHESMRIQ4CCQKH2f28AljX/fqXt/6uBbb9KwLV/YP8xwK6g/3JBRIAAAABACUAAAT0BEoADAA2QB0CCQkKBgQFCgwFDQ4CCAUDCgADDwcKFQAMXVkADwA/KwAYPzM/ERIXORESARc5ETMRMzEwEyERATMBASMBESMRISUCDgHXw/4rAfzP/g6w/qIESv3rAhX97f3JAi390wO2AAAAAAEAx/5/BdUFtgAPAFpANAwICAkADQUCAwMFCQMQEQwHaVnYDAE6DAEJDAEPAAygDAISAwwMBQ4KAwkSAyIFAGlZBRIAPysAGD8/PzMSOS9fXl1eXV1dKxESARc5ETMRMzMRMxEzMTAlMxEjESMRIREjETMRIREzBSWwsLb9ELi4AvC2pP3bAYECqv1WBbb9lgJqAAABAK7+hQUMBEoADwBqQD4BDQ0OBQIKBwgICg4DEBEIIgEMXVmEAZQBAgZFAQEDHwEBDQHdAe0BAxAFDwEBFAMBAQoDDw8OFQoFXVkKFQA/KwAYPz8zEjkvX15dX15dXV9dX10rABg/ERIBFzkRMxEzMxEzETMxMAERIREzETMRIxEjESERIxEBYgJUtKKypP2stARK/jcByfxO/e0BewHp/hcESgAAAAABAMcAAAZ1BbYADQBVQDEKBgYHAgsDAAMHAw4PCgVpWdgKAToKAQkKAQ8ACqAKAhIDCgoHDAgDAwcSDAFpWQwDAD8rABg/Mz8REjkvX15dXl1dXSsREgEXOREzMxEzETMxMAEhESMRIREjETMRIREhBnX+sLj9Eri4Au4CCAUS+u4Cqv1WBbb9lgJqAAEArgAABckESgANAGNAOwELCwwHAggFCAwDDg8BCl1ZhAGUAQIGRQEBAx8BAQ0B3QHtAQMQBQ8BARQDAQEMAw0PCAwVAwZdWQMPAD8rABg/Mz8REjkvX15dX15dXV9dX10rERIBFzkRMzMRMxEzMTABESERIRUhESMRIREjEQFiAlQCE/6htP2stARK/jcByZT8SgHp/hcESgAAAAEAx/4ACCkFtgAgAFJALBQABAUIAAABDRoaAQUDISIKHWlZDwoBCwMKCgUGERdpWREmAQUSBgNqWQYDAD8rABg/Mz8rERIAORgvX15dKxESARc5ETMRMxEzETMRMzEwISMRIREjESERNjMgABEUAgYjIiYnNRYzMjY1NCYjIgYHBN22/Vi4BBZMfwExAVCB9KhPh0aGfre9790rfRcFEPrwBbb9YQz+pP7Kzf7XmxUcpDH98vb4BwcAAAAAAQCu/goGrARKABwAUkAtBRAUFRgQEBEACgoRFQMdHhoNYVkPGp8aAgsDGhoWERUVFhNdWRYPAwhhWQMcAD8rABg/KwAYPzMSOS9fXl0rERIBFzkRMxEzETMRMxEzMTAlEAIjIic1FjMyETQmIyIHESMRIREjESERNjMyAAas18OEY2pv8KSqRzi0/e+0A3lNOvYBCDv+8v7dPJ89AZXXyxL+MwOy/E4ESv4nDP7VAAACAH3/rAXjBc0AKQA1AG1AOxYzCAAcETAkAzMAKiozISQRBTY3AzMtBQwnCicta1kAJxAnAgkDJycOFAoFaVkKFBppWRQEDh9pWQ4TAD8rABg/KwAYLysREgA5GC9fXl0rERIAORESOTkREgEXOREzETMRMxEzETMRMzEwARQCBxYzMjcVBiMiJwYjIAAREAAhMhcHJiYjIBEQEjMyNyYCNTQSMzISBzQmIyIGFRQWFzY2BbqHckJVTj04XbKUZpD+yv6hAUgBO4FcMRZmMv4/+uYyKlBex7C1w7pkWFlkWk5hcAKmr/7OVh8WoRlkJAGJAVYBeQGJI5wJFP2o/ub+0gtfARqf8gEG/vn/r7+/q4r3UkH5AAAAAgBx/8UE1wReAAoANAB5QCgYADMrHhMAJS4DKwYGAyMlEwU1NiMDIQgNLg8oKAhkWQAoECgCEwMouP/AQBgJDEgoKA8WCzBeWQsWG11ZFhAPIV1ZDxYAPysAGD8rABgvKxESADkYLytfXl0rERIAOTkREjk5ERIBFzkRMxEzETMRMxEzETMxMAEUFhc2NjU0IyIGASInBiMiJgI1EBIzMhcHJiMiBhUUFjMyNyY1NDYzMhYVFAYHFjMyNxUGAvZDOkJRg0VIAV6WfGh2l+J6/ONfTSdGQZeOpZ8+HIWnmpWeaVsyPkIzLAHyXJ4vKphr4Xj9Zk8ojAECoQEUAS8YkhPQ4MjODJDbrMC8sH7OPRkOjxD//wB9/j0EzwXLAiYAJgAAAQcDfAInAAAAC7YBUCMdDxUlASs1AAAA//8Acf49A5MEXgImAEYAAAEHA3wBhQAAAAu2AVIiHAMJJQErNQAAAAABABL+fwRcBbYACwA0QBsGCwgJBAkLAQQMDQkiBQECAWpZAgMLBmlZCxIAPysAGD8rEQAzGD8REgEXOREzETMxMAEhNSEVIREzESMRIwHb/jcESv43sbG4BRCmpvuU/dsBgQAAAQAp/oUDogRKAAsANEAbBgsICQQJCwEEDA0JIgUBAgFdWQIPCwZdWQsVAD8rABg/KxEAMxg/ERIBFzkRMxEzMTABITUhFSERMxEjESMBi/6eA3n+m6KwpAO0lpb84v3vAXsA//8AAAAABIcFtgIGADwAAAABAAD+FAQQBEoADgAmQBENEAMAAQEPEAcDDgIMAw8BGwA/PzMvMxI5ERIBOREzMhEzMTABIxEBMxMWFzM2NjcTMwECYrT+UrzmTxMKDj0Y47z+Uv4UAegETv2m3V85w0ACWvuyAAAAAAEAAAAABIcFtgAQAEJAIgISDwQICA0JBgkLAxESBwsMC2lZBAwMAAAOAwMJAQ8DCRIAPz8zEhc5ETkvMysRADMREgEXOREzMxEzMhEzMTABATMBFSEVIREjESE1ITUBMwJEAX3G/hkBLf7Tuf7RAS/+GckC5wLP/IE5ov6kAVyiMQOHAAEAAP4UBBAESgAVADxAHhEXBxMBAQYCFQIEAxYXDAUQBw8CGwAEBQRfWRMFFQA/MysRADMYPz8zEjkREgEXOREzMxEzMhEzMTAFESMRITUhATMXEhYXMzYTNzMDAyEVAmK0/ugBFv5UvDynUxIIJ8VcvNXXAROJ/p0BY4kESp7+VflUrAH38/3a/dyJAAAAAAEACP5/BO4FtgAPAENAIw4DCggMBgIDAw8GCQgFEBEPBgwGDAgNCgMIEgMiBQBpWQUSAD8rABg/Pz8zEjk5ERI5ERIBFzkRMxEzETMRMzEwJTMRIxEjAQEjAQEzAQEzAQQ9sbFm/n3+d8MB5v45zQFmAWnC/jyk/dsBgQJ7/YUC+gK8/cMCPf1IAAAAAQAl/oMETARKAA8ARUAkBQkBDwMNCQoKBg0ADwUQEQYNAw0DDwEKIgQBDw8VDAddWQwVAD8rABg/PzM/ERI5ORESORESARc5ETMRMxEzETMxMAEBMwEBMwEBMxEjESMBASMBsv6FzQEbARjL/oUBJaCwUv7V/tHLAjECGf5iAZ795/5n/esBfQG2/koAAAAAAQAS/n8GtgW2AA8AQEAiDAUADQIDAw0KBQcFEBEOAwMiCwcIB2pZCAMADAUMaVkFEgA/KxEAMxg/KxEAMxg/PxESARc5ETMRMxEzMTAlMxEjESERITUhFSERIREzBgC2rvuu/lwEMP4tAtm4pP3bAYEFEKam+5QFEgAAAAEAKf6FBaYESgAPAEBAIgILBgMICQkDAAsNBRARCSIEDwENDg1dWQ4PBgILAl1ZCxUAPysRADMYPysRADMYPz8REgEXOREzETMRMzEwASERIREzETMRIxEhESE1IQN5/p4CNbSmsPxu/sUDUAO0/OQDsvxM/e8BewO0lgABAKT+fwV/BbYAFwA7QB8PDAAVBQIDAwUMAxgZCRJpWQkJBRYNAwMiBQBpWQUSAD8rABg/PzMSOS8rERIBFzkRMxEzMxEzMTAlMxEjESMRBgYjIiY1ETMRFBYzMjY3ETMEz7CwuJXIaNDeuHyMX7GjuKT92wGBAlg1J8GyAkf903Z1HjYCxAAAAQCY/oME2wRKABYAPUAfARUJBg4LDAwOFQMXGAwiEgNdWRISDgcWDw4JXVkOFQA/KwAYPzMSOS8rABg/ERIBFzkRMxEzMxEzMTABERQzMjY3ETMRMxEjESMRBgYjIiY1EQFM01ylZbSisqRusWykvgRK/nC8Nz4B1/xO/esBfQHpRzismAGcAAEApAAABM8FtgAWAE9AJwUCCxUVCBYQDRERFgIDFxgJCQMAFhYRFAALCAAIaVkAABEOAwMREgA/PzMSOS8rEQAzETMSORgvERI5LxESARc5ETMzETMzETMRMzEwASARETMRFBYzETMRNjcRMxEjEQYHESMCdf4vuImQfYuXu7uudH0B/AFzAkf903pxAVr+rg46Asj6SgJUQBD+zQAAAAABAJgAAAQpBEoAGABRQCgBFwYQEAMRCwgMDBEXAxkaBAQYFBERDBQGAw8UFANdWRQUDAkYDwwVAD8/MxI5LysRADMRMxESORgvERI5LxESARc5ETMzETMzETMRMzEwAREUFxEzETY3ETMRIxEGBxUjNQcjIiY1EQFMwnlwfrS0gG55DA6kuARK/m62BgEp/uMXVAHX+7YB6VcZ+OkCrZcBngAAAAEAxwAABPIFtgASAC1AFgIRERIICRIJExQEDWlZBAQSAAMJEhIAPzM/ETkvKxESATk5ETMRMxEzMTATMxEkMzIWFREjETQmIyIGBxEjx7gBAsPP37l8jGa1l7gFtv2oXMGx/bgCLXZ2IjL9OwAAAAABAK4AAAROBEoAEgAtQBYLBwcIEgAIABMUDgNdWQ4OCAkPAAgVAD8zPxI5LysREgE5OREzETMRMzEwIRE0IyIGBxEjETMRNjYzMhYVEQOa0V+gaLS0Y7tsp7sBj7s1QP4rBEr+FEQ7q5j+ZgACADf/7AZQBc0AHwAlAHNAQBcPBQAjEBAIHQ8kJB0AAyYnDwIBDgMCAiMdECMQaVkHGCMBeiMBSSMBDyOvIwILAyMjGgsLIGlZCwQaE2lZGhMAPysAGD8rERIAORgvX15dXV1xMysRADMRMxgvX15dERIBFzkRMxEzMxEzETMRMzEwEzQ3MwYVFDMzEgAhIAARFSESADMyNjcVBgYjIAADIiYBIgYHIRA3HZoVbyIpAUgBGQEoATT72w4BAfGK4F9x3Y/+xv6gFZKfA7vJ7BIDYAOFTTotQ2UBRwFP/pL+oWj+//72MiCoKSIBYQFLdgIb//MB8gAAAAIALf/sBOkEXAAdACQAZkA3GxQKBRUNFCIiDQMFBCUmDwcBDgMHByEDFSEVZFkMGSEBAw8hARAGISEAEBAeXVkQEAAXYVkAFgA/KwAYPysREgA5GC9fXl1fXTMrEQAzETMYL19eXRESARc5ETMRMxEzETMxMAUiACckNTQ3MwYVFDMzNjYzMhIVFSESITI2NxUGBgMiBgchNCYDUPP+6Aj+8BuTFGgVGvzJ0Pb9DwgBUGSgZFugnoCXDAIxihQBF/8E3Uc0JUVl3fD+9uNt/oMgKpwnIAPdn5uYogACADf+fwZQBc0AIQAoAH9ARx0VCwYlFhYOAyAhFSYmIQMGBCkqDwgBDgMICCUDFiUWaVkNGCUBeiUBSSUBDyWvJQILAyUlABEhIhEiaVkRBAAfHxlrWR8TAD8rEQAzGD8rABg/ERI5L19eXV1dcTMrEQAzETMYL19eXRESARc5ETMRMxEzMxEzETMRMzEwBSQAAyImNTQ3MwYVFDMzEgAhIAARFSESADMyNjcVBgcRIxMiBgchNCYDov77/t0Skp8dmhVvIh8BUAEbAScBNfvbDgEB8YrgX7bqslDJ7BIDYMcKHQFdASh2d006LUNlAT8BV/6U/qVu/v/+9jIgqEIF/o8Gqv/z//MAAAAAAgAt/oUE6QRcACAAJwByQD4bFAoFFQ0fIBQlJSANAwUFKCkgIg8HAQ4DBwckAxUkFWRZDBkkAQMPJAEQBiQkABAQIV1ZEBAAHh4XXVkeFgA/KxEAMxg/KxESADkYL19eXV9dMysRADMRMxgvX15dPxESARc5ETMRMxEzETMRMzEwBSYCJyQ1NDczBhUUMzM2NjMyEhUVIRIhMjY3FQYGBxEjEyIGByE0JgLVwdEG/vAbkxRoFR77xtD2/Q8IAVBkoGRIkFuwSoCXDAIxiggfARLZBN1HNCVFZdzx/vbjbf6DICqcIh4D/pUFRJ+bmKL//wBSAAACYgW2AgYALAAA//8AAgAABtEHYgImAbAAAAEHAjYBGQFSABa5AAH//EAJFR0EBSUBEgUmACs1ASs1//8AAgAABfoGEAImAdAAAAEHAjYAsAAAAAu2AQAWHgABJQErNQAAAAABAMf+AAUnBbYAHgBGQCQKDxYHAwMEDxwcCwQDHyAHDAwAa1kMDAQFExlpWRMmCQUDBBIAPz8zPysREgA5GC8rEQAzERIBFzkRMxEzETMzETMxMAEiBxEjETMRNwEzATcgABEUAgYjIiYnNRYzMhI1NCQCZIZfuLjRAazb/YsZAUkBY4f8rFN+SnuYssj+7wJxH/2uBbb9POcB3f1SAv68/s3P/tebFB2kMQED7OX5AAEArv4KBDUESgAcAERAIxkABhcTExQADQ0aFAMdHhcaGhBdWRoaFBgVDxQVBAphWQQcAD8rABg/PzMSOS8rEQAzERIBFzkRMxEzETMzETMxMCUUBgYjIic1FhYzMjY1NCYjIgcRIxEzEQEzARYABDVvy4iHYy9sRIaVv7RRX7KyAd/H/jX6AQA7sP2EPJsYJdXC1MIZ/kgESv38AgT+HAT+6AAAAQAC/n8FqgW2ABYAO0AfAwAFAQQEBQ4DFxgDIhUHaVkVAwUAaVkFFQwRa1kMEwA/KwAYPysAGD8rABg/ERIBFzkRMxEzMzEwJTMDIxMjESEHAgIGIyInNRYzMjYSEyEE48eT05+4/jEfP16Xgko7ND1PXW03AyCk/dsBgQUU7v4U/lanGZoZxwK+Aa4AAAAAAQAO/oUErARKABQAO0AfAwAFAQQEBQ0DFRYDIhMHXVkTDwUAXVkFFQsQXlkLFgA/KwAYPysAGD8rABg/ERIBFzkRMxEzMzEwJTMDIxMjESECAgYjIic1FjMyEhMhA/C8g7Z9tf67GmCZdj0iGR9shSMClpj97QF7A7T+nv5hvwyJBgHMAfsAAAEAx/4ABSUFtgAVAFVAMAYSDg4PABMLDwsWFxINaVnYEgE6EgEJEgEPABKgEgISAxISDxADCWlZAyYUEAMPEgA/PzM/KxESADkYL19eXV5dXV0rERIBOTkRMzMRMxEzMzEwJRAAISImJzUWMyARESERIxEzESERMwUl/uT+/VR9THuMAX/9ELi4AvC2j/7D/q4UHaIxAekCH/1WBbb9lgJqAAAAAQCu/goEagRKABUAYUA5Ag8LCwwTEAgMCBYXDwpdWYQPlA8CBkUPAQMfDwEND90P7Q8DEAUPDwEUAw8PDBENDwwVAAVhWQAcAD8rABg/PzMSOS9fXl1fXl1dX11fXSsREgE5OREzMxEzETMzMTABIic1FjMyNjURIREjETMRIREzERACAtGGX25pfXT9rrS0AlK21v4KOp87xMUBuP4XBEr+NwHJ++f+9P7lAAEAx/5/Be4FtgAPAF5ANgMFDAgICQANBQEEBAUJAxARDAdpWdgMAToMAQkMAQ8ADKAMAhIDDAwFDgoDCRIDIgUAaVkFEgA/KwAYPz8/MxI5L19eXV5dXV0rERIBFzkRMxEzMxEzETMRMzEwJTMDIxMjESERIxEzESERMwUlyZbToLb9ELi4AvC2pP3bAYECqv1WBbb9lgJqAAAAAAEArv6FBScESgAPAG5AQAgKAQ0NDgUCCgYJCQoOAxARCCIBDF1ZhAGUAQIGRQEBAx8BAQ0B3QHtAQMQBQ8BARQDAQEKAw8PDhUKBV1ZChUAPysAGD8/MxI5L19eXV9eXV1fXV9dKwAYPxESARc5ETMRMzMRMxEzETMxMAERIREzETMDIxMjESERIxEBYgJUtL2DuH60/ay0BEr+NwHJ/E797QF7Aen+FwRKAAABAKT+fwTPBbYAFwA7QB8PDAIDABUFBQMMAxgZCRJpWQkJARYNAwMiAQRpWQESAD8rABg/PzMSOS8rERIBFzkRMzMRMxEzMTAhIxEjETMRBgYjIiY1ETMRFBYzMjY3ETMEz7KwqpXIaNDeuHyMX7GjuP5/AiUBtDUnwbICR/3TdnUeNgLEAAAAAQCY/oMEOQRKABYAPUAfARULDAkGDg4MFQMXGAwiEgNdWRISCgcWDwoNXVkKFQA/KwAYPzMSOS8rABg/ERIBFzkRMzMRMxEzMTABERQzMjY3ETMRIxEjETMRBgYjIiY1EQFM01ylZbShsZ5usWykvgRK/nC8Nz4B1/u2/oMCFQFRRzismAGcAAEAx/5/B0IFtgAYAEVAJBETAgYGBw4WEw8SEhMHAxkaAhcLAxMMCAMABxIRIhMOaVkTEgA/KwAYPz8zPzMSFzkREgEXOREzETMzETMRMxEzMTAhASMXFhURIxEhATMBIREzAyMTIxE0NyMBA0z+HggHCKoBEAHFCAHJAQ7HlNWitg4I/hgFAoSYXfx3Bbb7UgSu+u792wGBA5aD5/sAAAABAK7+hQYEBEoAGQBCQCIIChQVBQoGCQkKFQMaGxMLAAMKFggiAxYPFQ8KCgVdWQoVAD8rEQAzMxg/Mz8REhc5ERIBFzkRMxEzETMRMzEwJTY3ATMRMwMjEyMRBwcBIwEmJicRIxEzARYC9h4uAR7ovIO4f6ITP/7ukv7uEzQHouEBHyWsbXQCvfxO/e0BewOJO6z9XgKmLZoc/HcESv1DXQAA//8AUgAAAmIFtgIGACwAAP//AAAAAAUbB2ICJgAkAAABBwI2AD0BUgAWuQAC//xACREZBQYlAg4FJgArNQErNf//AF7/7APXBhACJgBEAAABBgI28QAADrkAAv/ntCkxExklASs1AAD//wAAAAAFGwcpAiYAJAAAAQcAagA/AVIAF0ANAwIADiAFBiUDAiMFJgArNTUBKzU1AAAA//8AXv/sA9cF1wImAEQAAAEGAGr1AAAQsQMCuP/vtCY4ExklASs1Nf////4AAAaRBbYCBgCIAAD//wBe/+wGgQReAgYAqAAA//8AxwAAA/gHYgImACgAAAEHAjYADgFSABW0AQwFJgG4//y0DxcCCyUBKzUAKzUA//8Acf/sBBsGEAImAEgAAAEGAjYMAAALtgITHycDCiUBKzUAAAIAef/sBWoFzQATABoARUAlAhgPCREXDxcbHBAYaVkAEBAQAhADEBAMBgYAaVkGBAwUaVkMEwA/KwAYPysREgA5GC9fXl0rERIBOTkRMzMRMzMxMAEiBzU2NjMgABEQACEgABE1IQIAAzISNyEUFgKo4fJ92oABTAFy/qX+yP7T/s8ELxH+/r7N9BD8lckFK1SoLCL+cP6c/qL+cQF5AXZGAQMBB/tiAQXt//MAAAD//wBo/+wEEgReAgYESAAA//8Aef/sBWoHKQImAt4AAAEHAGoAcwFSABqxAwK4/6VAChstAwklAwIwBSYAKzU1ASs1Nf//AGj/7AQSBdcCJgRIAAABBgBqxAAAELEDArj/v7QcLhIDJQErNTX//wACAAAG0QcpAiYBsAAAAQcAagEXAVIAF0ANAgEAEiQEBSUCAScFJgArNTUBKzU1AAAA//8AAgAABfoF1wImAdAAAAEHAGoArAAAAA23AgEAEyUAASUBKzU1AP//AE7/7ARGBykCJgGxAAABBwBq//kBUgAXQA0CAT0FJgIBACg6DQclASs1NQArNTUAAAD//wBE/+wDjwXXAiYB0QAAAQYAao4AABCxAgG4//a0JDYZEyUBKzU1AAEASP/sBDsFtgAYAEhAJhQPGAMDDw8AEhUIBRkaEwAAEmtZAAAGGBUWFhVpWRYDBgxrWQYTAD8rABg/KxESADkSORgvKxEAMxESARc5ETMRMxEzMTABBAQVFAQhICc1FhYzMjY1ECEjNQEhNSEVAggBFAEf/sr+6f79o2TiYsfE/kGJAeH9XQONA0IL1MHP50+oMDCWiwEIlgHQpJEAAAABAB3+FAO2BEoAGQBLQCcVDxkDFgkDDw8AEwkEGhsUAAATXlkAAAcZFhcXFl1ZFw8HDF1ZBxsAPysAGD8rERIAORI5GC8rEQAzERIBFzkRMxEzETMRMzEwARYEFRQGBiMiJzUWMzI2NTQmIyM1ASE1IRUBvusBDYb5n++Mt8yiwNDOeAHA/Y0DRgHTEPjJkOJ8SKRWuZudqX0B8ZiDAAD//wDJAAAFYAa8AiYBsgAAAQcBTQC2AVIAIEAOAX8VjxWfFa8VBBUFJgG4//20FRQRCSUBKzUAK101AAD//wCuAAAEdQVqAiYB0gAAAQYBTTcAAAu2AQAREA0GJQErNQD//wDJAAAFYAcpAiYBsgAAAQcAagDBAVIAGbYCAScFJgIBuP/+tBIkEQklASs1NQArNTUA//8ArgAABHUF1wImAdIAAAEGAGpCAAANtwIBAQ4gDQYlASs1NQAAAP//AH3/7AXDBykCJgAyAAABBwBqAM8BUgAXQA0DAi0FJgMCABgqBgAlASs1NQArNTUAAAD//wBx/+wEaAXXAiYAUgAAAQYAahsAAA23AwIAGCoHACUBKzU1AAAA//8Aff/sBcMFzQIGAnsAAP//AHH/7ARoBF4CBgJ8AAD//wB9/+wFwwcpAiYCewAAAQcAagDPAVIAF0ANBAMvBSYEAwAaLAYAJQErNTUAKzU1AAAA//8Acf/sBGgF1wImAnwAAAEGAGoZAAAQsQQDuP/+tBkrBwAlASs1Nf//AD3/7ASRBykCJgHHAAABBwBq/8oBUgAZtgIBMQUmAgG4/7S0HC4DCSUBKzU1ACs1NQD//wA7/+wDgwXXAiYB5wAAAQcAav9TAAAAELECAbj/xbQZKwMWJQErNTUAAP//ABf/7AT+BrwCJgG9AAABBwFNAC8BUgAdQBQBfxqPGp8arxoEGgUmAQAaGQkSJQErNQArXTUA//8AAv4UBBQFagImAFwAAAEGAU2zAAALtgEDGxoACiUBKzUA//8AF//sBP4HKQImAb0AAAEHAGoAOQFSABdADQIBLAUmAgEAFykJEiUBKzU1ACs1NQAAAP//AAL+FAQUBdcCJgBcAAABBgBquwAADbcCAQEYKgAKJQErNTUAAAD//wAX/+wE/gdzAiYBvQAAAQcBUwCRAVIAF0ANAgEqBSYCAVUgJgkSJQErNTUAKzU1AAAA//8AAv4UBBQGIQImAFwAAAEGAVMKAAANtwIBTSEnAAolASs1NQAAAP//AKQAAATPBykCJgHBAAABBwBqAGgBUgAXQA0CASkFJgIBABQmCRMlASs1NQArNTUAAAD//wCYAAAEOQXXAiYB4QAAAQYAahcAAA23AgEAEyUSCSUBKzU1AAAAAAEAx/5/BA4FtgAJAC9AGAQJBgcCBwkDCgsHIgADaVkAAwkEaVkJEgA/KwAYPysAGD8REgEXOREzETMxMBMhFSERMxEjESPHA0f9ca6uuAW2pPuS/dsBgQABAK7+hQNKBEoACQAvQBgECQYHAgcJAwoLByIAA11ZAA8JBF1ZCRUAPysAGD8rABg/ERIBFzkRMxEzMTATIRUhETMRIxEjrgKc/hiisqQESpb84v3vAXv//wDHAAAGFwcpAiYBxQAAAQcAagEdAVIAGbYEAy0FJgQDuP//tBgqBRclASs1NQArNTUA//8ArgAABYsF1wImAeUAAAEHAGoAyQAAABCxBAO4//60FykJFiUBKzU1AAD//wAv/moEDgW2AiYCmAAAAQcDfQCcAAAAC7YBABYWBwclASs1AAAAAAEAEP5qA04ESgAZAGFANQILCRIWFg0JGAcQFAcJBBobFQsMC2RZEg8MHwwCDgMMDAkODhFdWQ4PCRZkWQkVAAVhWQAjAD8rABg/KwAYPysREgA5GC9fXl0zKxEAMxESARc5ETMRMzMRMxEzMzEwASInNRYzMjU1IxEjNTMRIRUhESEVIREzERABDjw/LjlisJycAqL+EgFY/qie/moZlhNrjwHdiwHimP62i/6u/vD+7wAAAAEACP5qBN8FtgAXAEtAKBIHDgwQChYHBwITCg0MBhgZEwoQEAoJEQ4DDBIJFGlZCRIABWtZACMAPysAGD8rABg/PzMSOTkREjkREgEXOREzETMRMxEzMTABIic1FjMyNTUjAQEjAQEzAQEzAQEzERAD8Dw/LjhiZv59/nfDAeb+Oc0BZgFpwv48AX6g/moZlhNrjwJ7/YUC+gK8/cMCPf1I/ab+1/7vAAEAJf5qBEgESgAXAEtAKBIWDgwQChYHBwITCg0MBhgZEwoQChAJEQ4PDBUJFGRZCRUABWFZACMAPysAGD8rABg/PzMSOTkREjkREgEXOREzETMRMxEzMTABIic1FjMyNTUjAQEjAQEzAQEzAQEzERADWD0+LjliXP7V/tHLAY3+hc0BGwEYy/6FASuW/moZlhNrjwG2/koCMQIZ/mIBnv3n/lr+8P7vAAEABgAABKgFtgARAF9AOAIPCg0HBAYLCwkEDRABEQ8IEhMNEQQAChEAEWlZBzkAAZoAAWgAAQAAMAACkAABAAAPBQIDDA8SAD8zPzMSOS9dcV1dcTMrEQAzETMRMxESARc5ETMRMxEzETMxMBMhATMBATMBIRUhASMBASMBIX0BM/53zQFmAWfE/nUBOf69AbjR/n3+d8UBuP6/A1YCYP3BAj/9oKL9TAJ7/YUCtAAAAQAlAAAEFwRKABEAXUA5Ag8HBAoNBgsLCQ0EEAERDwgSEw0KEQARZFkEB7UAxQDlAAOIAAGfAAEvAD8AvwADAAAPBQIPDA8VAD8zPzMSOS9dcV1dMzMrEQAzMxESARc5ETMRMxEzETMxMBMhATMBATMBIRUhASMBASMBIXMBDP64zQEbARjL/rYBE/7pAWPN/tX+0csBYP7uAnsBz/5iAZ7+MYv+EAG2/koB8AAAAAACAH8AAAQ7BbYACQASADpAHw4ABgMSABITFAILaVkAAqACAhIDAgIHBAMHEWtZBxIAPysAGD8SOS9fXl0rERIBOTkRMzMRMzEwExAhMxEzESEgJAEjIgYVFBYzM38CRr64/mH+9f7uAwSw2b23ws0BqAGkAmr6StQB2HmJjX8A//8Acf/sBD0GFAIGAEcAAAACAH//7AZ9BbYAGAAiAFpAMR0DCgciEg8PIgMDIyQXAAgAEHAQAhwDEAYaaVkABqAGAhIDBgYjCAMMHwAfa1kVABMAPzIrEQAzGD8SOS9fXl0rABgvX15dERI5ERIBFzkRMxEzMxEzMTAFIiY1NCQhMxEzERQzMjY1ETMRFAYjIicGEyMiBhUQITI2NQJM4+oBKAEijbjgZnO21rnoYnAij8+7ARd6iBLT0tngAmr7t+J7bQHd/hqyzKelAryHkv76dGkAAgBv/+wGjQYUACAALABPQCkqEh4bGSQGAwMkEgMtLg8EARUDBAwYDxUcABUoXVkVEAAhDyFdWQkPFgA/MysRADMYPysAGD8REjk5L19eXRESARc5ETMRMzMzETMxMCUyNjURMxEUBiMiJicGBiMiAhEQEjMyFhczJycRMxEUFiEyNjc1NCYjIBEUFgT8cmm2zcCCnC5TtX3V6+fNaaA8DQcEs2n9vJqOA4+f/uqIgYKGATP+vcnCV2tuVgEpAQwBDQEwTVVOVAG2+4iXhLDNI+LE/ljQzgAAAQBI/+wGgwXLACoAXEAyFhcXEwYiHx8GAQ0EKywAIHAgAhwDIBYBAgIBa1kPAgEVAwICKxAQCWtZEAQlHGlZJRMAPysAGD8rERIAORgvX15dKxESADkYL19eXRESARc5ETMRMzMRMzEwASM1MzI2NTQmIyIGByc2NjMyFhUUBgcVBBMWFjMyNjURMxEUBiMiJicmJgGox7+90pOBY7NhXGHzg9n2sZsBYgYCaHp0bbTVwMrWAgLPAqiVj4JqezlCe0pOxKWNuhkIM/7TkH14hAHH/inIxdHMk4wAAAEATv/sBdMEXgAlAFJAKyAhIR4SBQICEg4YBCYnDwMBFQMDIA4PDw5dWQ8PJhsbFF1ZGxAIAF1ZCBYAPysAGD8rERIAORgvKxESADkYL19eXRESARc5ETMRMzMRMzEwJTIRETMRFAYjIAMmJiMjNTMgNTQjIgYHJzY2MzIWFRQHFRYWFxYESteywMf+fg4Fj5KOcwEh6k6MTztSqm6718NqdgYEgwEGATP+vcrDAUlkWZOonCQijyclm4a6OQgVfWTJAAABAEj+fwThBcsAJABXQC8DBAQAFwgNCgsLDRcSHwUlJgMSExMSa1kPEwEVAxMTDSILIiIaa1kiBA0IaVkNEgA/KwAYPysAGD8REjkvX15dKxESADkREgEXOREzETMRMzMRMzEwARQGBxUWFhUVMxEjESMRNCYjIzUzMjY1NCYjIgYHJyc2NjMyFgQMuqK3wriwuubny8/J36CFZ7trLS9l+4ff/gRijbcaCBmzkvr92wGBAZ6Dh5WPgmp7OEM/PEtNxAAAAQBO/oUEKwRcAB4AUEAqFBUVEgcZHhscHB4HAw0FHyAUAwQcIgQDXVkEBB4PDwpdWQ8QHhldWR4VAD8rABg/KxESADkYLysAGD8REjkREgEXOREzETMRMzMRMzEwATQhIzUzIDU0JiMiByc2MzIWFRQHFRYWFRUzESMRIwLX/suYeQE5gnOZn0Gqz8DZyoBtqLCkAS/BlaZOUEiPTJuItjkLJoljl/3vAXsAAAEAAP/pBysFtgAgAEJAIwgSIBoXIBchIgAYcBgCHAMYEAFpWRADHRVpWR0TBgtrWQYTAD8rABg/KwAYPysAGC9fXl0REgE5OREzETMyMTABIQICBgYjIic1FjMyNhISEyERFBYzMjURMxEUBiMiJjUEBv5UOU5RjW5FQjQ9O1E+VDQC+2xw3bTPwsfNBRL+Nv4S+ncZmhttARcCIgGP+82Dc/wBx/4pwM3LxAABAA7/7AY7BEoAHQBAQCIADggFBQ4WAx4fDwYBFQMGHBBdWRwPCwNdWQsWFBleWRQWAD8rABg/KwAYPysAGC9fXl0REgEXOREzETMxMAEUFjMyEREzERQGIyImNREhAgIGIyInNRYzMhITIQPfZnPPtMHAxcr+yxpgmXY9IhkfbIUjAoUBgYJ8AQQBNf69ysPIxwI5/p7+Yb8MiQYBzAH7AAAAAAEAx//sB2YFtgAZAGVAOgoGBgcOCwMXFBQDBwMaGwAVcBUCHAMVCgVpWdgKAToKAQkKAQ8ACqAKAhIDCgoHDAgDBxIAEWlZABMAPysAGD8/MxI5L19eXV5dXV0rABgvX15dERIBFzkRMxEzMxEzETMxMAUiJjURIREjETMRIREzERQWMzI2NREzERQGBdfHyv05uLgCx7Zsb25ttM0Uy8YBLf1WBbb9lgJq+8+DdXiEAcf+KcDNAAABAK7/7AawBEoAGABxQEIBFhYXBQITDQoKExcDGRoPCwEVAwsBFV1ZhAGUAQIGRQEBAx8BAQ0B3QHtAQMQBQ8BARQDAQEXAxgPFxUQCF1ZEBYAPysAGD8/MxI5L19eXV9eXV1fXV9dKwAYL19eXRESARc5ETMRMzMRMxEzMTABESERMxEUFjMyEREzERQGIyImNTUhESMRAWICPrRoc8+ywMHEy/3CtARK/jcByf05g30BBgEz/r3Kw83Cbv4XBEoAAAAAAQB9/+wFogXLAB0APUAgDxwWCAIcHB0IAx4fAB1pWQAABQwME2lZDAQFGWlZBRMAPysAGD8rERIAORgvKxESARc5ETMRMxEzMTABIRUQACEgABE0EiQzMhYXByYmIyAAERAAMzI2NSEDYgJA/sz+xf65/pGzAVTpeehdRlnPYf79/uQBCvDS2P6BAvZY/qP+qwGQAWHlAVW0MCqeJy/+yP7q/uT+zOTlAAAAAAEAcf/sBLYEXgAZAEVAJQwCEgcCGBgZBwMaGwAZXVkPAAETAwAABAoKD11ZChAEFV1ZBBYAPysAGD8rERIAORgvX15dKxESARc5ETMRMxEzMTABIRUQISAAERAAITIXByYjIgYVFBYzMjY1IQKwAgb9+P7s/tcBQgEi3ao9qKbM2sa9pqz+sAJIRv3qASkBDgEPASxQjUrdy87WnJcAAAABABL/7AT+BbYAFABAQCIDCgUTDQoKEwADFRYAC3ALAhwDCwQAAQBqWQEDEAhpWRATAD8rABg/KxEAMxgvX15dERIBFzkRMxEzETMxMBM1IRUhERQWMzI1ETMRFAYjIiY1ERIEPv41c3DgttPDydIFEKam/HODdfwByf4pwM3LxAOVAAABACn/7ASaBEoAFAA+QCAIEAoDExAQAwUDFRYPEQEVAxEJBQYFXVkGDwANXVkAFgA/KwAYPysRADMYL19eXRESARc5ETMRMxEzMTAFIiY1ESE1IRUhERQWMzI2NREzERADFMPO/qYDav6kanNoa7MUysUCO5SU/c2DfXqGATn+vf5zAAABAG3/7ARkBcsAJwBdQDMlJCQNFiENAAYcHBIAIQQoKSUTEBATa1k6EAEDDxDfEAIPBhAQHgMDCmtZAwQeGWtZHhMAPysAGD8rERIAORgvX15dX10rERIAORESARc5ETMRMxEzETMRMzEwEzQkMzIWFwcmJiMiBhUUFjMzFSMiBhUUFjMyNxUGISAkNTQ2NzUmJpoBDOOJ4nBiZ7RqjZrQzs/N3+rHtu3Hr/71/vP+1s+6qrIEXKnGRUuDQjV5bHuNmIuFi4hcqk3cx5a/FggZsgAA//8AWP/sA5gEXgIGAYIAAAABAAL+agWDBbYAHgBAQCMCGwkdBwcJEgMfIAkbaVkJEhkLaVkZAxAVa1kQEwAFa1kAIwA/KwAYPysAGD8rABg/KxESARc5ETMRMzMxMAEiJzUWMzI1NSMRIQcCAgYjIic1FjMyNhITIREzERAEkzs/Ljhitv4xHz9el4JKOzQ9T11tNwMgoP5qGZYTa48FFO7+FP5WpxmaGccCvgGu+u7+1/7vAAEADv5qBJEESgAcAEBAIwIZCRsHBwkRAx0eCRlkWQkVFwtdWRcPDxRkWQ8WAAVhWQAjAD8rABg/KwAYPysAGD8rERIBFzkRMxEzMzEwASInNRYzMjU1IxEhAgIGIyInNRYzMhITIREzERADojw/Ljhjtf69HF+Zd0EeFSNugyUClp/+ahmWE2uPA7T+mP5lvw6HCAHQAfv8Qf7w/u8AAP//AAD+mAUbBbwCJgAkAAABBwJkBPIAAAALtgIADhQEByUBKzUAAAD//wBe/pgD1wRcAiYARAAAAQcCZARgAAAADrkAAv/htCYsCBolASs1//8AAAAABRsH4wImACQAAAEHAmME/gFSABlAEAIAEhEFBiUCABIQEgISBSYAK101ASs1AP//AF7/7APXBpECJgBEAAABBwJjBKgAAAALtgIOMiYTGSUBKzUAAAD//wAAAAAFGwfRAiYAJAAAAQcDdATpAVIAHrEDArj//EANFA4FBiUDAiAUARQFJgArcTU1ASs1Nf//AF7/7ARUBn8CJgBEAAABBwN0BJgAAAAQsQMCuP/itCwmExklASs1NQAA//8AAAAABRsH0QImACQAAAEHA3UE4wFSAB6xAwK4//xADRQOBQYlAwIgFAEUBSYAK3E1NQErNTX//wAl/+wD1wZ/AiYARAAAAQcDdQSYAAAAELEDArj/6LQsJhMZJQErNTUAAP//AAAAAAUbCEoCJgAkAAABBwN2BN8BUgAesQMCuP/yQA0UDgUGJQMCIBQBFAUmACtxNTUBKzU1//8AXv/sBCUG+AImAEQAAAEHA3YEoAAAABCxAwK4/+q0LCYTGSUBKzU1AAAABAAAAAAFGwhiAAcADQAlADQAeEAlBA4HCA0DNjUnKSwKMBowAgswDg8sARssBRYfAA6QDqAOAyADDrj/wLMUF0gOuP/AQBgLDkgOIhMOAxoKBAUNAmlZDQ0EBQMABBIAPzM/EjkvKxESADkYLxczLysrX15dMzMQxl5dEMZeXRE5ORESARc5ETMxMCEDIQMjATMBAQMnBgcDASIuAiMiBgcjNjYzMh4CMzI2NzMGBhMjJicGByM1NzY3MxYWFwRcsP28rroCO6YCOv5apEYeIaYBcCRHQ0AcKCoOXQ1kTCVJRT4bKCoMXAtlZGJmb1x5YjZvNrgwdzQBxf47Bbz6RAJoAbvbeGP+RQUdHSQdLjJqcx0kHS8xanP+pkJiU1EXPHlPRYU6AP//AF7/7APXBxACJgBEAAABBwN3BJYAAAAQsQMCuP/ltEQ+ExklASs1NQAA//8AAP6YBRsHcwImACQAAAAnAmQE8gAAAQcBSwAvAVIAHkAMAyYFJgIADhQoByUDuP//tCchBQYlKzUrNQArNf//AF7+mAPXBiECJgBEAAAAJwJkBIEAAAEGAUvYAAAWtwICJiwIGiUDuP/ftD85ExklKzUrNQAA//8AAAAABRsIEwImACQAAAEHA3gE8gFSABtAEAMCAxkhBQYlAwIgFgEWBSYAK3E1NQErNTUAAAD//wBe/+wD1wbBAiYARAAAAQcDeASeAAAAELEDArj/5rQxORMZJQErNTUAAP//AAAAAAUbCBMCJgAkAAABBwN5BPABUgAbQBADAgAYIAUGJQMCIBYBFgUmACtxNTUBKzU1AAAA//8AXv/sA9cGwQImAEQAAAEHA3kEnAAAABCxAwK4/+S0MDgTGSUBKzU1AAD//wAAAAAFGwhYAiYAJAAAAQcDegTwAVIAH0ATAwIAISkFBiUDAiAfAfAfAR8FJgArXXE1NQErNTUAAAD//wBe/+wD1wcGAiYARAAAAQcDegSkAAAAELEDArj/7LQ5QRMZJQErNTUAAAAEAAAAAAUbCF4ABwANACUAMgCLQBgEDgcIDQM0My8VKAEMKAEoKCwQJiAmAia4/8BAPAoQSCYFpx+3HwIXH+AO8A4CIA4wDgIDDhMOAgsOCBMYEwIVEyIOAwAaARsDGgoEBQ0CaVkNDQQFAwAEEgA/Mz8SOS8rERIAORgvX15dFzNeXS9eXV1xMzNdEMYrcTIyL11dMxESARc5ETMxMCEDIQMjATMBAQMnBgcDASIuAiMiBgcjNjYzMh4CMzI2NzMGBgMgAzMWFjMyNjczBgYEXLD9vK66AjumAjr+WqRGHiGmAXAkR0NAHCgqDl0NZEwlSUU+GygqDFwLZd3+6BNsB09rYlgIbQ2cAcX+OwW8+kQCaAG723hj/kUFHR0kHS4yaHEdJB0vMWdy/qYBCEU8QEGChgAAAP//AF7/7APXBwwCJgBEAAABBwN7BJwAAAAQsQMCuP/ktEBIExklASs1NQAA//8AAP6YBRsHTgImACQAAAAnAU4AMQFiAQcCZATyAAAAG0ASAg4FJgMAHCIoByUCABEZBQYlKzUrNQArNQAAAP//AF7+mAPXBewCJgBEAAAAJgFO3AABBwJkBH8AAAAWtwMANDoIGiUCuP/itCkxExklKzUrNQAA//8Ax/6YA/gFtgImACgAAAEHAmQEwwAAAA65AAH//7QMEgILJQErNf//AHH+mAQbBF4CJgBIAAABBwJkBLoAAAALtgIPHCIDCiUBKzUAAAD//wDHAAAD+AfjAiYAKAAAAQcCYwTPAVIAGUAQAQAQEBACEAUmAS0ZDAILJQErNQArXTUA//8Acf/sBBsGkQImAEgAAAEHAmMEyQAAAAu2AkApHAMKJQErNQAAAP//AMcAAAP4BzMCJgAoAAABBwFS/+YBUgATQAsBDAUmAQAVIQILJQErNQArNQAAAP//AHH/7AQbBeECJgBIAAABBgFS1gAAC7YCCiUxAwolASs1AP//AMcAAAR6B9ECJgAoAAABBwN0BL4BUgAbQBACAQASDAIDJQIBIBIBEgUmACtxNTUBKzU1AAAA//8Acf/sBGwGfwImAEgAAAEHA3QEsAAAAA23AwILIhwDCiUBKzU1AP//AE4AAAP4B9ECJgAoAAABBwN1BMEBUgAbQBACAQkSDAIDJQIBIBIBEgUmACtxNTUBKzU1AAAA//8AP//sBBsGfwImAEgAAAEHA3UEsgAAABdADQMCEyIcAwklAwIiESYAKzU1ASs1NQAAAP//AMcAAARBCEoCJgAoAAABBwN2BLwBUgAbQBACAQASDAIDJQIBIBIBEgUmACtxNTUBKzU1AAAA//8Acf/sBCsG+AImAEgAAAEHA3YEpgAAAA23AwIAIhwDCiUBKzU1AAADAMcAAAP4CGIACwAjADIApEApBgoKAQQAAAgBAzQzJScqKi46LgIuDDAqQCoCACoBCSoCFR0ADAEgAwy4/8CzFBdIDLj/wEAyCw5IDCARDAMYBglpWcgG2AYCOgYBCQYBDwAGkAagBgMSAwYGAQICBWlZAgMBCmlZARIAPysAGD8rERIAORgvX15dXl1dXSsAGC8XMy8rK19eXTMzEMZeXXIQxl0ROTkREgEXOREzETMRMzEwISERIRUhESEVIREhASIuAiMiBgcjNjYzMh4CMzI2NzMGBhMjJicGByM1NzY3MxYWFwP4/M8DMf2HAlT9rAJ5/u0kR0NAHCgqDl0NZEwlSUU+GygqDFwLZWRiZm9ceWI2bza4MHc0Bbai/jig/fYG4x0kHS4yanMdJB0vMWpz/qZCYlNRFzx5T0WFOgAAAP//AHH/7AQbBxACJgBIAAABBwN3BKYAAAANtwMCByUxAwolASs1NQD//wDH/pgD+AdzAiYAKAAAACcCZATBAAABBwFLAAABUgAgtAIkBSYBuP/9tQwSAQAlArj//7QlHwIDJSs1KzUAKzUAAP//AHH+mAQbBiECJgBIAAAAJwJkBLQAAAEGAUvzAAAUQA4CIhwiAxIlAws1LwMKJSs1KzX//wBSAAACYgfjAiYALAAAAQcCYwPLAVIAHUAUAQAQEBAgEDAQBBAFJgEuGQwGCyUBKzUAK101AP//AHsAAAHuBpECJgDzAAABBwJjA3cAAAALtgEACAcCAyUBKzUAAAD//wBS/pgCYgW2AiYALAAAAQcCZAO8AAAADrkAAf/+tAwSBgslASs1//8Am/6YAXMF5QImAEwAAAEHAmQDagAAAA65AAL//LQQFgQKJQErNf//AH3+mAXDBc0CJgAyAAABBwJkBYMAAAAOuQAC//+0GB4GACUBKzX//wBx/pgEaAReAiYAUgAAAQcCZATNAAAADrkAAv/8tBgeBwAlASs1//8Aff/sBcMH4wImADIAAAEHAmMFjwFSABlAEAIAHBAcAhwFJgIsJRgGACUBKzUAK101AP//AHH/7ARoBpECJgBSAAABBwJjBNkAAAALtgIqJRgHACUBKzUAAAD//wB9/+wFwwfRAiYAMgAAAQcDdAV9AVIAG0AQAwIAHhgGACUDAiAeAR4FJgArcTU1ASs1NQAAAP//AHH/7ASDBn8CJgBSAAABBwN0BMcAAAAQsQMCuP/8tB4YBwAlASs1NQAA//8Aff/sBcMH0QImADIAAAEHA3UFfQFSABtAEAMCBB4YBgAlAwIgHgEeBSYAK3E1NQErNTUAAAD//wBU/+wEaAZ/AiYAUgAAAQcDdQTHAAAADbcDAgAeGAcAJQErNTUA//8Aff/sBcMISgImADIAAAEHA3YFewFSAB6xAwK4//xADR4YBgAlAwIgHgEeBSYAK3E1NQErNTX//wBx/+wEaAb4AiYAUgAAAQcDdgTHAAAAELEDArj//LQeGAcAJQErNTUAAAAEAH3/7AXDCGIACwAXAC8APgB5QCoMBgASBhJAPzEzNho6KjoCOhgPNgEKNkAuMkg2QBsgSDYJKSEAGAEgAxi4/8CzFBdIGLj/wEAVCw5IGB0sGAMkCRVpWQkEAw9pWQMTAD8rABg/KwAYLxczLysrX15dMzMQxisrXl0Qxl0ROTkREgE5OREzETMxMAEQACEgABEQACEgAAEQEjMyEhEQAiMiAgEiLgIjIgYHIzY2MzIeAjMyNjczBgYTIyYnBgcjNTc2NzMWFhcFw/6d/sH+vf6fAV8BRwE+AWL7fPbs6/Ty6+72AmckR0NAHCgqDl0NZEwlSUU+GygqDFwLZWRiZm9ceWI2bza4MHc0At3+of5uAYsBaAFlAYn+cf6f/t7+0AEsASYBJQEp/tMDhx0kHS4yanMdJB0vMWpz/qZCYlNRFzx5T0WFOgAA//8Acf/sBGgHEAImAFIAAAEHA3cExQAAAA23AwIAIS0HACUBKzU1AP//AH3+mAXDB3MCJgAyAAAAJwJkBYMAAAEHAUsAwQFSACC0AzAFJgK4//61GB4GACUDuP//tDErBgAlKzUrNQArNQAA//8Acf6YBGgGIQImAFIAAAAnAmQE0QAAAQYBSwwAABa3AgAYHgcAJQO4//60MSsHACUrNSs1AAD//wB9/+wGcQdzAiYCXAAAAQcAdgEpAVIAE0ALAlsqJQYAJQIqBSYAKzUBKzUAAAD//wBx/+wFKwYhAiYCXQAAAQYAdm0AAAu2AlItKAcAJQErNQD//wB9/+wGcQdzAiYCXAAAAQcAQwCDAVIAFbQCIgUmArj/XbQmKgYQJQErNQArNQD//wBx/+wFKwYhAiYCXQAAAQYAQ9AAAA65AAL/tbQoJAcAJQErNQAA//8Aff/sBnEH4wImAlwAAAEHAmMFkQFSABlAEAIAJRAlAiUFJgIvLSEGACUBKzUAK101AP//AHH/7AUrBpECJgJdAAABBwJjBNkAAAALtgIqMCQHACUBKzUAAAD//wB9/+wGcQczAiYCXAAAAQcBUgCkAVIAE0ALAgAqNgYAJQIhBSYAKzUBKzUAAAD//wBx/+wFKwXhAiYCXQAAAQYBUvkAAAu2AgctOQcAJQErNQD//wB9/pgGcQYUAiYCXAAAAQcCZAV/AAAADrkAAv/7tCEnBgAlASs1//8Acf6YBSsE8gImAl0AAAEHAmQEzQAAAA65AAL//LQkKgcAJQErNf//ALj+mAUfBbYCJgA4AAABBwJkBVAAAAALtgEAEhgIASUBKzUAAAD//wCi/pgERARKAiYAWAAAAQcCZATDAAAADrkAAf/stBUbFAolASs1//8AuP/sBR8H4wImADgAAAEHAmMFVAFSABlAEAEAFtAWAhYFJgEmHxIIASUBKzUAK101AP//AKL/7AREBpECJgBYAAABBwJjBNcAAAATQAsBISIVFAolARkRJgArNQErNQAAAP//ALj/7AaLB3MCJgJeAAABBwB2APIBUgATQAsBWCQfEQAlASQFJgArNQErNQAAAP//AKL/7AWqBiECJgJfAAABBgB2fQAAC7YBWyciHQklASs1AP//ALj/7AaLB3MCJgJeAAABBwBDAFIBUgAWuQAB/7hACR8bEQAlARwFJgArNQErNf//AKL/7AWqBiECJgJfAAABBgBDuQAADrkAAf+XtCIeHQklASs1AAD//wC4/+wGiwfjAiYCXgAAAQcCYwVgAVIAGUAQAQAfEB8CHwUmATInGxEAJQErNQArXTUA//8Aov/sBaoGkQImAl8AAAEHAmME3QAAAAu2AScqHh0JJQErNQAAAP//ALj/7AaLBzMCJgJeAAABBwFSAIEBUgATQAsBECQwEQAlARsFJgArNQErNQAAAP//AKL/7AWqBeECJgJfAAABBgFSAgAAC7YBCSczHQklASs1AP//ALj+mAaLBhQCJgJeAAABBwJkBVAAAAALtgEAGyERACUBKzUAAAD//wCi/pgFqgT0AiYCXwAAAQcCZAS6AAAADrkAAf/itB4kHBMlASs1//8AAP6YBIcFtgImADwAAAEHAmQEpAAAAA65AAH//LQJDwUEJQErNf//AAL+FAQUBEoCJgBcAAABBwJkBbD//wALtgEkGB4LJSUBKzUAAAD//wAAAAAEhwfjAiYAPAAAAQcCYwSuAVIAGUAQAQANEA0CDQUmASgWCQcCJQErNQArXTUA//8AAv4UBBQGkQImAFwAAAEHAmMEbwAAAAu2ASElGAAKJQErNQAAAP//AAAAAASHBzMCJgA8AAABBwFS/8oBUgATQAsBCQUmAQASHgcCJQErNQArNQAAAP//AAL+FAQUBeECJgBcAAABBgFSlAAAC7YBAyEtAAolASs1AP//AHH+xQTZBhQCJgDTAAABBwBCALYAAAALtgIlKywDFyUBKzUAAAAAAvvbBNn+ugYhAAkAEwAfQBIFDw9vDwIPDwGgCwEPC18LAgsAL11dMzMvXTMxMAEjJiYnNTMWFhcFIyYmJzUzFhYX/rpmPLAkxhxjMf6YZkGvIcccYzEE2TDHPBU9rkQZNMg3FT2uRAAC/GoE2f+8Bn8ADQAVADNAIBBACQ1IEAAVEBUCFRUDBg8KHwoCCgoBoAYBDwZfBgIGAC9dXTMzL10SOTkvXcQrMTABIyYnBgcjNTY2NzMWFyc2NzMVBgcj/tlkcGNyYWUzdzC8R5JQSTa0UXtnBNlLW2VBGTqHRWefwltwFWxiAAL7jQTZ/t8GfwANABUAM0AgEkAJDUgSAA8QDwIPDwMGDwofCgIKCgGgBgEPBl8GAgYAL11dMzMvXRI5OS9dzSsxMAEjJicGByM1NzY3MxYXJSMmJzUzFhf+32ZhcmppZDVxM74+m/3fYnlWsjlGBNlBZWBGFz1/SlmtrFtzFXVYAAAAAAL8agTZ/4UG+AANAB4AZEAhEBMTGA8dHx0CGR1ACw5IHQASAURgEpASsBIDcBKAEgISuP/AsxkdSBK4/8BAGAkMSBISAwYPCh8KAgoKAaAGAQ8GXwYCBgAvXV0zMy9dEjk5LysrcXJeXcQrXl0yOS8zMTABIyYnBgcjNTY2NzMWFxMUBwcjJzY2NTQjIgc1NjMy/tlkcGNyYWUzdzC8R5KsfwZUCjs+Yy8YGDjEBNlLW2VBGTqHRWefAXhmHU+BCR8lPgZUBgAAAvxmBNn+6QcQABcAJgBLQDEUBQxAExdIDEAJDUgMEQkMAwBACw9ICQABABseNSIBHyIBCSIBIiIZoB4BDx5fHgIeAC9dXTMzL11dXRI5xl0rFzIvKyszMzEwASIuAiMiBgcjNjYzMh4CMzI2NzMGBhMjJicGByM1NzY3MxYWF/4tJEdDQBwoKg5dDWRMJUlFPhsoKgxcC2VkYmZvXHliNm82uDB3NAYzHSQdLjJqcx0kHS8xanP+pkJiU1EXPHlPRYU6AAL8cQTZ/s0GwQAHABUANUAiAoAkBzQHAgAHEAcCAgcSDwsfCy8LAwsLD6AIAQ8IXwgCCAAvXV0zMy9dM9RfXV0azDEwATY3MxUGByMTIiYnMxYWMzI2NzMGBv1aVCuyWXRkQpKSB2wHUWldXQhtDZ8F9G9eFXVa/vyOfkc+Q0KDiQAAAAL8cQTZ/s0GwQAHABQANUAiBIAkATQBAgABEAECAgESDwsfCy8LAwsLDqAIAQ8IXwgCCAAvXV0zMy9dM9ZfXV0azTEwASMmJzUzFhcDIAMzFhYzMjY3MwYG/dVid1ayLk85/ugTbAdRaWBaCG0NnQXdXnEVZWj+5QEMRz5BRISIAAAAAAL8cQTZ/s0HBgAQAB0AZrYCBUAeJEgFuP/AQBEKEEgFBQqQDwHgDwEPsAQBBLj/wLMqL0gEuP/AsxskSAS4/8BAGQkMSAQEGg8THxMvEwMTExegEQEPEV8RAhEAL11dMzMvXTMzLysrK3LEXXIyOS8rKzMxMAEUBwcjJzY2NTQjIgc1NjMyAyADMxYWMzI2NzMGBv4xfQZUCjk+YSUkFj7Alf7oE2wHUWlgWghtDZ0GeWMeKVwJICM9BlAI/dMBDEc+QUSEiAAAAvxmBNn+6QcMABcAJABFQC4UBQxAExdIDEAJDkgMEQkMAy8APwACAAABACEPGh8aLxoDGhoeoBgBDxhfGAIYAC9dXTMzL10zxl1dFzIvKyszMzEwASIuAiMiBgcjNjYzMh4CMzI2NzMGBgMgAzMWFjMyNjczBgb+LSRHQ0AcKCoOXQ1kTCVJRT4bKCoMXAtl3f7oE2wHT2tiWAhtDZwGMx0kHS4yaHEdJB0vMWdy/qYBCEU8QEGChgAAAAABAC3+PQFxAAAADwAaQAoCBQAKABARDQgCAC8vMxESATk5ETMzMTAXNCczFhUUBiMiJzUWMzI23Yt/oGlkQDcjNSUz7meHd4dbahFzCy8AAAEAGf5qAYMApAALABxADQoHAgcMDQAFa1kAIwgALz8rERIBOTkRMzEwEyInNRYzMjURMxEQkzs/Ljhiov5qGZYTawEz/tf+7wAAAP//ABT+FARcBbYCJgA3AAABBwB6AUYAAAALtgEGFhcBACUBKzUAAAD//wAh/hQCtgVGAiYAVwAAAQcAegDNAAAADrkAAf/7tB0XCQMlASs1AAIAEv/sBHsGFAAcACkAdEBCFicPDRoKCg0DJw0nKisJGwYAFw8QD19ZFAgQGBACEQ8QHxACFAMQEAASAA0VAB1dWQAAEAAgAAMJAwAQBiRdWQYWAD8rABg/X15dKwAYPz8SOS9fXl1eXTMrEQAzERI5ORESATk5ETMRMxEzETMRMzEwATISERACIyImJyMGByMRIzUzNTMVIRUhFRQHMzYXIgYVFRQWMzI2NTQmArbZ7PDVb643Dh8GgZyctAG1/ksKCm/HppCTp5SRkgRc/tX+9P7w/tdQT3gTBNeHtraHPXFxpJW84AjhwdnN0NAAAAMAFAAABWgFtgAbACQALQB0QD4SExMgAggcJiYaDyAWKiogGggELi8FBRoLEiUcHCVrWdgcATocAQMPHAEPBRwcGgsAJAska1kLAxoma1kaEgA/KwAYPysRADMREjkYL19eXV9dXSsREgA5ERI5GC8REgEXOREzETMRMxEzETMRMxEzMTABIhUUFyMmJjU0NjMhIAQVFAYHFRYWFRQEIyEREyEyNjU0JiMjEREhMjY1NCYjATV9FaIJDo2UAdcBJgEFjoinoP7z7v39uQEOrJyruvEBJ7CqtLUFGWc9MRVCGYV9r7uCqRkKHa+SxdsFGf4vboF4av2V/e6IioN9AP//AMcAAASDBbYCBgGrAAAAAgCu/+wEewYUABYAIwBFQCMQIRQKCg0DIQ0hJCUJFQANFQ4RXVkOAAAXXVkAEAYeXVkGFgA/KwAYPysAGD8rABg/Ejk5ERIBOTkRMxEzETMRMzEwATISERACIyImJyMGByMRIRUhFRQHMzYXIgYVFRQWMzI2NTQmArbZ7PDVb643Dh8GgQNe/VYKCm/HppCTp5SRkgRc/tX+9P7w/tdQT3gTBhSX43FxpJW84AjhwdnN0NAAAgC4/+wErAW2AAoAFgBKQCoNAAAWEQYWBhcYDQppWdgNAToNAQkNAQ8ADaANAhIDDQ0XCwMUA2tZFBMAPysAGD8SOS9fXl1eXV1dKxESATk5ETMRMxEzMTABFBYzMjY1NCYjIwMzESEgBBUUBCMgEQFxma6jk8W8/Lm5ARIBCQEg/vvt/f4Brp6HkomNewMK/ZbZz8/pAcIAAAAAAgCo/+wEdQYUABAAHQA5QBwIBhERAw4XAxceHwgACwQACxpdWQsQABRdWQAWAD8rABg/KwAYPxESORESATk5ETMRMxEzMzEwBSIAEREzERQHMzYzMhIREAIBFBYzMjY1NCYjIgYVApPr/wC0Cgpv5dns+/3ip5OTkZGYpZAUASgBDwPx/oZxcaT+1f70/u7+2QIzxNrazNDQvd8AAQA//+wEkQXLABcAJkAUAxAQChYDGBkAE2lZAAQHDWlZBxMAPysAGD8rERIBFzkRMzEwASAAERQCBCMiJic1FjMyABEQACMiByc2AfIBRAFbov7NynGxV8Gb7AEO/vv1na9KrAXL/nb+mOH+rbkaIaA6ATwBEgEZATVQnFYAAAABAH3/7AWRBt0AJABHQCccJgkhAw8hFQ8VJSYZHmtZDxkfGS8ZAwkDGRMTAGlZEwQMBmlZDBMAPysAGD8rABgQxF9eXSsREgE5OREzETMRMxEzMTABIgAREAAzMjcVBgYjIAARNBIkMzIXNTQ2MzIXFSYjIhUVByYmAzns/vIBBvKcw12scP69/qOnAT/YfHRufj09MTlgSkSeBSn+xP7u/uX+zTqgIhkBiQFo4gFUuB0dhI4alBVjaKAfMQAAAAABAHH/7AReBh8AIQA6QB4PIyAUGgMUCAMIIiMMEWFZDAEGF2FZBhAAHWFZABYAPysAGD8rABg/KxESATk5ETMRMxEzETMxMAUiABEQADMyFzU0NjMyFxUmIyIVFQcmIyIGFRQWMzI3FQYCZu3++AEL9zhNbX48Py87YTeLYqaenpuRjHIUASMBEAEUASsMuoSPG5UUYv6WNNHPx9NAoDv//wA9AAAFWgW2AgYAkgAAAAIAFAAABf4FtgAHABwAPEAeExkFDwsAAA8ZAx0eFhYPHBEEHARrWRwDDwVrWQ8SAD8rABg/KxEAMxESORgvERIBFzkRMxEzETMxMAEQACEjETMgASAAERAAISERIyIVFBcjJiY1NDYzBTv+7f7q78YCUv3yAVgBef51/o/+aDV9FaIJDo2UAuMBFwEf+4UFGP6F/q7+lv6BBRlnPTEVQhmFfQAAAgBoAAAEJQW2AAgAFABTQC8TBA0KEQgNCBUWEAFrWdgQAToQAQkQAQ8AEKAQAhIDEBALFBQTaVkUAwsHa1kLEgA/KwAYPysREgA5GC9fXl1eXV1dKxESATk5ETMzETMzMTABIyIGFRQWMzMTESEgETQkITMRITUDbajZxbXCz7j+aP3bAS4BGr39VAKugo6BfwUY+koBltLkAciiAAAAAAIAcf/sBD0GFAAMACIAS0AnIAoKFR4DDRsPDwMVAyMkGhASGA4VISBdWSEAGAddWRgQEgBdWRIWAD8rABg/KwAYPysAGD8REjk5ERIBFzkRMzMRMxEzETMxMCUyNjU1NCYjIgYVFBYFIycjBiMiAhEQEjMyFzMmJjURITUhAlCkl5mki5iXAnuRGwhz49bs7dfddw0DCv1WA16Bs8wh5cPdzczSgZOnASgBDwENAS6iFHkVAR+XAAACAG//7ARmBhQAHgAqAENAIgAQCR8WAxAcJSUQFgMrLAAiAx8TEBYZKF1ZGQAGDV1ZBhYAPysAGD8rERIAORESOTkREgEXOREzETMRMzMRMzEwARYWFRQGIyImJzcWFjMyNjU0JicmJjU0ADMyBBUUAgEUFhc2NjU0JiMiBgK6iHbErGi+gE5MrGhSYG2l1awBDvLlARLe/aF7h73CqpGeqAJaTaBjhJouQI0rPUxBRWtbdfSd7AEL+NKz/wABiH60RS3WoYqptQAAAAABAHsAAAOsBbYACwBTQDAHCwoFAQEDCwMMDQQDaVnYBAE6BAEJBAEPAASgBAISAwQECwgIB2lZCAMLAGlZCxIAPysAGD8rERIAORgvX15dXl1dXSsREgEXOREzMxEzMTA3IREhNSERITUhESF7Ann9rAJU/YcDMfzPogIKoAHIovpKAP//AHn/7AVqBc0CBgLeAAAAAQBv/+wEZgXLACcAXUAzJSQkDRYhDQAGHBwSACEEKCklExAQE2tZOhABAw8Q3xACDwYQEB4DAwprWQMEHhlrWR4TAD8rABg/KxESADkYL19eXV9dKxESADkREgEXOREzETMRMxEzETMxMBM0NjMyFhcHJiYjIgYVFBYzMxUjIgYVFBYzMjcVBiEgJDU0Njc1Jiay+diE9WRYa6xqgpTFxsfR4eLGu9ffuP74/uz+3b+5k6IEYKfETUt9STR9aHuRmoeDiZJrqFrUz5y4GQgZtQAAAAAB/+n+FAP4BbYAEgBRQDECDRERCAsPCAMTFA0QaVlJDQEPDT8NXw1vDY8Nnw0GCwMNDRMJCQxpWQkDAAVpWQAbAD8rABg/KxESADkYL19eXV0rERIBFzkRMxEzMjEwEyInNRYzMjY1ESEVIREhFSEREFxCMSozSDkDMf2HAlL9rv4UG5wVU1UGWKL9+qH86f6+AAAAAQB9/+wF8gbdACcAWkAyFSkgCBoCAg4lJScIAygpEhdrWQ8SHxIvEgMJAxIMACdpWQAABQwMHWlZDAQFI2lZBRMAPysAGD8rERIAORgvKwAYEMRfXl0rERIBFzkRMzMRMxEzETMxMAEhEQYGIyAAETQSJDMyFzU0NjMyFxUmIyIVFQcmIyAAERAAITI3ESEDQgH5dPCe/rL+krYBV+mShm5+O0AxOmBGwbj++/7aARoBDZOM/r8DBP0zJSYBjAFj5QFWtSMjhI4alBVjbKBU/sT+7v7e/tIjAbIAAAIAAP4UBI8FtgAUAB8AQkAiFCEMEBgYFRsJAAsDFRULCQMgIRALABgEBhMMAwYeaVkGGwA/KwAYPzMSFzkREgEXOREzETMRMxEzETMyETMxMCUWFhUUBiMiJjU0EwEzEhIXNjcBMwE0JicGBhUUFjMyAqxERYNqboCJ/h3Bt8EPFFIBH8L9/iMiJx8nH0XdhuVNeZibdp8BFwTb/hr+AFVo2wL4+XM2k0FNjC0/OgAAAQCu/+wG0wYUACQARUAkGhgUFBUiDAYDAwwVAyUmGhUeFgAEDxUVHhBdWR4QCQBdWQkWAD8rABg/KwAYPz8/ERI5ERIBFzkRMxEzETMRMzMxMCUyNjURMxEUBiMiJjURNCYjIgYVESMRMxEUBzM2NjMyFhURFBYFHYl5tNDm4tVrdJiNtLQKDDKrY7q9fYGTngKY/WLq1sfKAUGGg7vV/ckGFP44WkBVVb/S/r6FgwABALj/7AK0BbYADwAfQA4BDg4IEBEPAwsEaVkLEwA/KwAYPxESATk5ETMxMAERFBYzMjY3FQYGIyImNREBb0pTLF4eGnA4pJYFtvucY2IOCZgMFKmtBHQAAQBGAAACbwW2ABMAWEAsBQkBAQMOEhIHAwwQAAMAFBURBQYFaVkOBgYBCgwJCgluWQoDEwIBAm5ZARIAPysRADMYPysRADMREjkYLzMrEQAzERIBOTkRMzMRMzMRMxEzETMzMTAhITU3ESM1MxEnNSEVBxEzFSMRFwJi/fCsuLisAhCsubmsaicCDZ8B5ilqain+Gp/98ycAAAAAAQDHAAAE9AXDABsAPEAeFgEBHQwICAkJABwdDAYAAwcHCQoDAgkSExhrWRMEAD8rABg/Mz8SOREXMxESATk5ETMRMxEzETMxMAEBIyYDJicHESMRMxE2NzYBNjYzMhcVJiMiBgcCngJW2T27knCiuLg5QiwBTztpVD4oKjAiPC0DPfzDUQEQ04id/eEFtv0fSEwyAZpNQRGPBig2AAAAAAEArgAABDMGHwAYAEFAIhMHAQAMDA0HCggGCg0EGRoCCgcDCwsNBQ8JDRUQFWFZEAEAPysAGD8zPxI5ERczERIBFzkRMxEzETMzETMxMAEHMzc3ATMBASMBBxEjERAzMhcVJiMiBhUBYAgIPUYBX9L+RAHb2f6DfbL4Q0IvOy8yAueyTlQBc/4r/YsCAG3+bQUAAR8blRQ2QQAAAAEAHwAAAfIGFAALAERAJgIEBwUABAQJBQUMDQMHCAdfWQCICAEvCK8IvwjfCAQICAUKAAUVAD8/EjkvXV0zKxEAMxESATkRMzMRMxEzETMxMAEzFSMRIxEjNTMRMwFikJC0j4+0A1yH/SsC1YcCuAAB//T/7ARWBiEAJgBkQB0AGigBAiMQEAUTAhICBQQKBScoBAIFExASBhEDA7j/wEAYCQxIEQMBASIiDQAVDQhdWQ0BHBddWRwWAD8rABg/KwAYPxI5ETMROTkrERIXORESARc5ETMRMxEzETMRMzIxMCMBJwcnNyYmIyIHNTYzMhYXNxcHARYWMzI3FQYjIiYnAyYnIwYHAwwB2S/ZJ8oaRDs+NURDbYsz4CbQAW4ULyQYJTJDSlogllURCCFQ+gQ1g0GBPS4oDJERVmNEgUL8BTk2CoUYSlsBpPNTfr79wQAAAAABALj/7Ad1BbYAJAA+QB8BIwoHExAVFQcjAyUmFh0gEQgkAxQSDQQgBGlZGSATAD8zKxEAMxg/PzMzEjk5ERIBFzkRMzMRMxEzMTABERQWMzI2NREzERQWMzI2NREzESMnIwYGIyImJyMGBiMiJjURAXF2ga2luXeFrKG5kh4LM8h3i68tCjrTftPBBbb79I+Qwc0Dnfv0j5DM6wN0+kqoV2VkaGJq2OYEDAAAAf/p/hQFTgW2AB0ANkAaFw4SEh0LCAUdBR4fBA0MCQADDBIVGmlZFRsAPysAGD8/MxI5ORESATk5ETMzETMRMzIxMBMzFwETMyYCNREzESMBIxcWFREUBiMiJzUWMzI2NcfVLQHg/wgCDKzX/PEIBQyLikIxKjNIOQW2R/0a/nMYASdCAzn6SgS+UbaG/CWpmRucFVNVAP//AK7+FARMBF4CBgGEAAD//wB9/+wFwwXNAgYCewAAAAIAff/sB+EFzQAbACcARUAjGiIcBgAiEhMTIgYDKCkaCwsJExIXJQklaVkOCQQDH2lZAxMAPysAGD8zKxEAMxg/EjkRMxESARc5ETMRMxEzETMxMAEQACEgABEQACEgFzY2MzIWFREjETQmIyIGBxYBEBIzMhIREAIjIgIFgf6u/tH+zf6wAU8BNgEjqDnNe9HCuHaCbIonbfu+5dzb4+Hb3uUC3f6e/nEBigFpAWUBibtYY9jn+/IEDpCPPj+9/ur+3v7QASwBJgElASn+0wACAHH+FAYdBF4AGgAlAEdAJBkAGwcAIRMUFCEHAyYnGQwMAwoUGxckCiRdWQ8KEAMeXVkDFgA/KwAYPzMrEQAzGD8REjkRMxESARc5ETMRMxEzETMxMAEQACMiJgI1EBIzMhc2NjMyFhURIxEQIyIHFgUUFjMyNjU0JiMgBDX+/eKO23b/5NmBNZtXpKSzvII+R/z2lpGRmJeS/tkCJ/7y/tOLAQaqAQsBLJNLSMDT+0kErwEEWoLEz9fW0M7SAAACABQAAAUSBbYACAAeAEZAIxUbABAQEQsEBBEbAx8gGBgRHg8Aa1kPDx4REhMIHghrWR4DAD8rEQAzGD8SOS8rERIAORgvERIBFzkRMxEzETMRMzEwATMyNjU0JiMjNyARFAQhIxEjESMiFRQXIyYmNTQ2MwIjk9rEtsG6ywIk/tD+6ai5NX0VogkOjZQC142cjYyd/lLf8P3HBRlnPTEVQhmFfQAAAAACAK7+FAR7Bh8AIAAtAEpAKCUJGQwEHR0eEysrAx4DLi8ZDBYQHhsABWFZAAEQIV1ZEBAWKF1ZFhYAPysAGD8rABg/KwAYPxESOTkREgEXOREzETMRFzMxMAEyFxUmIyIGFRUUBwczNjYzMhIREAIjIicjFxYVESMTEAEiBgcVFBYzMjY1NCYBqENCLzsvMggEDECobtbt7tfddwwECLQCAeijkQKUpoqbmwYfG5UUNkGkPU4pWlD+1/7y/vP+0p8pTj3+PQbsAR/9qLjFI9/H4MjJ1QAAAAACAMf/MwTbBbYACAAXAE5AJhUSFAQOAAoKCxIECwQYGRYLDghpWQ4OCwwUCQkAa1kJCQsMAwsSAD8/EjkvKxEAMxESORgvKwAYEMYREgE5OREzETMRMzMRMxEzMTABMzI2NTQmIyMRESMRMxUzIAQVEAUBIwEBf9uypKa60bi42wEQAQX+2wGR1/6eAiuMi4p+/UX+cQW2zc/Q/t5l/XACXAAAAAEAYP/sA/4FywAlADtAHQ4AIBMaABMIAAgmJxYEBAAIEAtpWRAEIx1pWSMTAD8rABg/KxESADkRMxESATk5ETMRMxEzETMxMBM0NjY3PgI1NCYjIgcnNjMyBBUUBgcOAhUUFjMyNxUGBiMiJGBSqKqMfTeTgpOoOq/C0QECqs+fjz6lpLrgRdJ79f7rAYdjknI/NE1fR2VwTp5Syqucy0s6Ul5Dbn5hsSIt3AABAFz/7ANtBF4AIwA9QB4NACASGgASBwAHJCUWGhIEAAcPCl1ZDxAiHV5ZIhYAPysAGD8rERIAORESORESATk5ETMRMxEzETMxMBM0NjY3NjY1NCYjIgcnNjMyFhUUBgYHDgIVFBYzMjcVBiMgXD6Bi7VmdHBnpz6lm8nTO36bbHMwg4GssIDY/koBL0xuWDRFTz0+R0aPSpaNTGpVPClAPy1QUlikRf//AEoAAAReBbYCBgFwAAAAAv+N/hQC3QYfABgAIgBPQC4RJBwDAwofFxcjJAAeXVmAAAEAABAAIACgALAABQkDAAAGBhldWQYBFA1dWRQbAD8rABg/KxEAMxgvX15dcSsREgE5ETMzMhEzETMxMBMiJjU0NjMyFhURFBYzMjY3FQYGIyImNREDIgYVFDMzNTQmiXuBhnqCiUpTJ2UcH200o5VkLCRlTzIEVnhtbnaUjfpvZGENCYkOE6mtBOwBNTEfUhtIPwAAAAEAIf4UArYFRgAeAFBAKBQdAhYNCxIWFgsdBwsHHyAQEA8SDBUSFWRZEg8JGV1ZCRYABWFZABsAPysAGD8rABg/KxEAMxEzMxgvERIBOTkRMxEzETMRMxEzETMxMAEiJzUWMzI1NSMgEREjNTc3MxUhFSERFBYzMjY3ERABxzw/LjhiFv6+m51IawE9/sNbUSNeGP4UGZYTa9EBUwJ/Vkjq/Iz9hl9mDgn+j/7vAAAAAQAUAAAEhQW2ABEAL0AXAggPEA0QCAMSEwUFCxASDgALAGlZCwMAPysRADMYPxI5LxESARc5ETMRMzEwASIVFBcjJiY1NDYzIRUhESMRATV9FaIJDo2UA1D+ObgFFGI9MRVCGYV9ovrsBRQAAQAh/+wCtgYfACAARkAlEhAABAQUEAILCxoQAyEiFx1hWRcBFAARAwADZFkADw4HXVkOFgA/KwAYPysRADMRMxg/KxESARc5ETMRMzMRMxEzMTABIRUhERQWMzI2NxUGBiMgEREjNTc1ECEyFxUmJiMiBhUBcQE9/sNbUSNeGBlpNv6+m50BLk9OF18nQToESoz9hl9mDgmKCxUBUwJ/VkiHATwblQgMRkUAAAEAFP4UBFwFtgARACxAFwYRBAwRAQQSEwUBAgFpWQIDDglpWQ4bAD8rABg/KxEAMxESARc5ETMxMAEhNSEVIREUFjMyNxUGIyImNQHd/jcESP46OkcyKjFQi4oFFKKi+kpXURWcG5mpAAAAAAEATP/pBfQFtgAfAEZAJAoDFh0JDRcdEw0DAwYaEwQgIQYaGAoXGBdpWQcYAxAAaVkQEwA/KwAYPzMrEQAzEjk5ERIBFzkRMxEzMxEzETMRMzEwJTI2NTQCJzUhFSEWEhUQACEgABE0EjchNSEVBgIVFBIDIerxprACTv6Tl6D+nf7G/sL+nqCV/pYCTrOl84v/9+EBQ36TonT+qM3+zP6iAVwBNM0BXHKik4D+utz2/wAAAAEAAAAABJEFywAZAChAEwUAEBUQGhsJBAUDBBIYE2lZGAQAPysAGD8/EjkREgE5OREzMjEwARQHASMBMwEWFzY3NjcTNjU0JiMiBzU2MyAEkVz+jrn99sEBSz8dESYrF65KQj8yKi5TARQEpG3V/J4FtvxGtIw9WGZBAaC1S1BBFZwbAAEAAAAABIcFzQARACxAFQsTAg0ODhITCQkOCgMOEgUAa1kFBAA/KwAYPz8SOS8REgE5ETMyETMxMBMiBzU2MzIWFwEBMwERIxEBJlArJTpBS1wmAQYBc8b+I7j+tCQFMRCVF0dT/bQCz/yB/ckCLwK6SAAAAQAC/hQEVgReAB8ANEAaAyEVDg4UICEZFRQVFQ8ABV1ZABAMEV1ZDBsAPysAGD8rABg/PxI5ERIBOTkRMxEzMTABMhcVJiMiBgcBBgYjIic1FjMyNzcBMxMWFzM2NxM2NgPhQzIlGCQtFv6JQcGNS0oyRq5KNf5Gwe1LEQgRUpsnWwReGIUKNjn8DLOhEY8MwpIETv2PzF9J5AG9b1cAAAEATgAABEQFtgARAFdAMAMOBg0NCQIHEAsRDggSEwoRABFsWQcPAAESBQAADgYDBAQDaVkEAw8LDg4LaVkOEgA/KxESADkYPysREgA5EjkYL19eXTMrEQAzERIBFzkRMxEzMTATIQEhNSEVATMVIQEhFSE1ASGiAXkBNf0WA8n+uvr+pv6kAxf8CgFs/ugDQgHQpIv+F5L99KSLAiUAAAABAFAAAANzBEoAEQBsQD0HAg0JBgMRBgICEAsRDgUSEwoRABFeWQc1AEUAZQADCAABEQ8AARQDAAAOBgMEBANkWQQPDwsODgtkWQ4VAD8rERIAORg/KxESADkSORgvX15dXl1dMysRADMREgEXOREzETMRMzMRMzEwEyETITUhFQMzFSEDIRUhNQEjdQFJ4P3VAvHjz/7L/AJU/N0BCuUCgQE9jIf+vo/+mYt3AXsAAAEASP/sBDsFtgAaAEhAJhYQGgQEEBAAFBcJBRscFQAAFGtZAAAHGhcYGBdpWRgDBw1rWQcTAD8rABg/KxESADkSORgvKxEAMxESARc5ETMRMxEzMTABHgIVFAQhICc1FhYzMjY1NCYjIzUBITUhFQJUi+B8/sr+6f79o2TiYsfEysF/Aab9WgONA4UEcseD4PlPqDAwqJyOm4UBnaSRAAABAHH/7ARkBbYAHABGQCUECgAKGBEDBhwYBR0eBRwcB2tZHBwUAAQBAQRpWQEDFA1rWRQTAD8rABg/KxESADkSORgvKxEAMxESARc5ETMzETMxMBM1IRUhARUjIgYVFBYzMjY3FQYGIyImJjU0NjY3rgON/V0Bo5+tvsrBZOFjXNqIs/6Ee96OBSWRpP5jhaaWkaAwMKgsI2/MiYvRdQQAAQBG/hQD3wRKABgASUAmBAoAChUDEBAGGBUEGRoFGBgHXlkYGBIABAEBBF1ZAQ8SDV1ZEhsAPysAGD8rERIAORI5GC8rEQAzERIBFzkRMxEzMxEzMTATNSEVIQEVIyIGFRQWMzI3FQYhIgA1NAA3fQNG/Y0BwHjO0MGhzLeM/vvo/uABB/ADx4OY/g99rqiSslakSAED0tgBAREAAQA5/hQDmARKACQAZkA3FA4YHAMOEiIVIggcDg4ZCAMlJgsfXVkPCx8LAgkDCxMZGRJeWRkZBRgVFhYVXVkWDwUAXVkFGwA/KwAYPysREgA5EjkYLysRADMYL19eXSsREgEXOREzETMzETMRMxEzETMxMAEyNxUGIyImNTQ2MzI2NTQmIyM1ASE1IRUBFhYVFAYjIgYVFBYBuqycetC5xr67o462yXgBdv3XAyf+gdTj9O5sZ23+qkqkPIB0gHhwinpzfQFOmIP+sAq/ssPPKjgrMwABAGIAAAQpBh8AIQBeQDETBAsdFRwcECEaHRAEBCAdAyIjFiEAIWxZEw8AAQsDAAANHQ0Ha1kNAR4aHRpsWR0SAD8rEQAzGD8rERIAORgvX15dMysRADMREgEXOREzETMzETMRMxEzETMxMBMhNjY1NCYjIgYHJzYzMhYVFAYHMxUhBgYBFSEVITUBNyGgAe04MXpsXZVLYL/iwNwrM83+2RY0/n0C/vw5AWdg/ncDWlqbWWdyREN5rMesVqFbjyFD/jEJj5YBuH0AAAABAEj/7AQ7BbYAGwBDQCMbBhgBFgYSEhYLAxwdARZrWQEBCRkAGBkYaVkZAwkPa1kJEwA/KwAYPysRADMREjkYLysREgEXOREzETMzETMxMAERMzIWFhUUBCEgJzUWFjMyNjU0JiMjESM1IRUBy1ae9ob+yv7p/v2jZOJix8TAq/6hA40FEv51bsyI4PlPqDAwqJyLngIipKQAAAABAET/7AOPBEoAGQBDQCIZBRYKARQFEBQQGhsBFF1ZAQEIFwAWFxZdWRcPCA1dWQgWAD8rABg/KxEAMxESORgvKxESATk5ETMRMzMzETMxMAEVMzIWFRQGIyInNRYzMjY1NCYjIxEjNSEVAZYv2+/43fKEt72NmJqfw38C6gOy7763p7tHolZtbG1qAYOYmAABAEr/7ANYBUYAIQBQQCgJEQIWBwsLABEcHAAWAyIjDxEfHAEFB0ABCgQHBwpkWQcPFBleWRQWAD8rABg/KxEAMxEzGhgQzRESORI5ERIBFzkRMxEzETMRMxEzMTABNSM1NzczFSEVIRUUFhcWFhUUBiMiJzUWMzI2NTQmJyYmARe7vUdrAT3+wyo2qYTm0NiAsKyIfGOGYUkDBrhWSOr8jLxFQRI/kmqaqUWkWFhKPFQ2KYQAAAAAAgCu/hQEUAReAA4AGAA8QB4EDwAAAQoSARIZGgQOBwIPARsHFV1ZBxAOD11ZDhYAPysAGD8rABg/PxESORESATk5ETMRMxEzMzEwASMRMxczNjMyFhUUAgQHNSQANTQmIyIGFQFitJQYCHDUxOa7/qfaAQwBKJSBlYr+FAY2lqry1rv+07UNkyMBGNeOqLTJAAAAAAEBwf4UAmAGFAADABZACQABAQQFAgABGwA/PxESATkRMzEwASMRMwJgn5/+FAgA//8Auv4UA2gGFAAnA7v++QAAAAcDuwEIAAAAAAABAIX+FAOcBhQAEwBeQDITDwQIDAwBEQ0GCgoNDwMUFQcTABNsWQQADwABFQMLDwgQDxBsWcAPAQAPAA8NAgANGwA/PxI5OS8vXSsRADMRM19eXREzKxEAMxESARc5ETMRMzMzETMzETMxMBMhETMRIRUhFSEVIREjESE1ITUhhQE8nwE8/sQBPP7En/7EATz+xAM3At39I5P+lP0CAv6U/gAA//8Ak//jAZEFtgIGAAQAAP//AMcAAAoHB3MAJgAnAAAAJwA9BcMAAAEHAUwFmgFSAB60AyUFJgO4/+tADCciFhclAjQTGgApJSs1KzUAKzX//wDHAAAJSgYhACYAJwAAACcAXQXXAAABBwFMBWIAAAAXuQAD//5ADCciFhclAj4TGgApJSs1KzUAAAD//wBx/+wIXwYhACYARwAAACcAXQTsAAABBwFMBHsAAAAUQA4DAjcyJiclAlgjKg85JSs1KzUAAP//AMf+ewWmBbYAJgAvAAABBwAtBDEAAAALtgEaDhEFFSUBKzUAAAD//wDH/hQFpAXlACYALwAAAQcATQQxAAAADbcCARkOEQQhJQErNTUA//8Arv4UA4MGFAAmAE8AAAEHAE0CEAAAAA23AgFXDA8AHyUBKzU1AP//AMf+eweJBbYAJgAxAAABBwAtBhQAAAALtgFjHB8AIyUBKzUAAAD//wDH/hQHhwXlACYAMQAAAQcATQYUAAAADbcCAWMcHwAvJQErNTUA//8Arv4UBmUF5QAmAFEAAAEHAE0E8gAAAA23AgFTHSAUMCUBKzU1AP//AAAAAAUbB44CJgAkAAABBwFMAC8BbQATQAsCABoVBQYlAhgFJgArNQErNQAAAP//AF7/7APXBiECJgBEAAABBgFM4gAADrkAAv/ptDItExklASs1AAD//wADAAACtweOAiYALAAAAQcBTP7/AW0AE0ALARYFJgEDGBMGCyUBKzUAKzUAAAD///+vAAACYwYhAiYA8wAAAQcBTP6rAAAAC7YBARALAgMlASs1AAAA//8Aff/sBcMHjgImADIAAAEHAUwAwQFtABW0AiIFJgK4//+0JB8GACUBKzUAKzUA//8Acf/sBGgGIQImAFIAAAEGAUwMAAAOuQAC//60JB8HACUBKzUAAP//ALj/7AUfB44CJgA4AAABBwFMAI0BbQATQAsBHAUmAQAeGQgBJQErNQArNQAAAP//AKL/7AREBiECJgBYAAABBgFMGwAAC7YBBiEcFAolASs1AP//ALj/7AUfCAICJgA4AAABBwlMAvIBUgAbQA8DAgEhBSYDAgEFLSwIASUBKzU1NQArNTU1AAAA//8Aov/sBEQGsAImAFgAAAEHCUwCdQAAABBACQMCAQEwLxQKJQErNTU1AAD//wC4/+wFHwhKAiYAOAAAAQcIiALfAVIAJkAQAwIBICEwIUAhAyEFJgMCAbj/+rQkLggBJQErNTU1ACtxNTU1//8Aov/sBEQG+AImAFgAAAEHCIgCcwAAABBACQMCAQYnMRQKJQErNTU1AAD//wC4/+wFHwheAiYAOAAAAQcJSwLpAVIAJkAQAwIBICEwIUAhAyEFJgMCAbj//rQ6MwgBJQErNTU1ACtxNTU1//8Aov/sBEQHDAImAFgAAAEHCUsCbwAAABKyAwIBuP/8tD02FAolASs1NTX//wC4/+wFHwhKAiYAOAAAAQcIiQLhAVIAJkAQAwIBICEwIUAhAyEFJgMCAbj/77QuEggBJQErNTU1ACtxNTU1//8Aov/sBEQG+AImAFgAAAEHCIkCdQAAABKyAwIBuP/7tDEVFAolASs1NTUAAgBo/+wEEgReABQAGwBXQDURGQkDCwsYCQMcHQoZXlkJCgESDwofCgIPCi8KPwp/Co8KBRMDCgoGAAAOYVkAEAYVXVkGFgA/KwAYPysREgA5GC9fXl1xXl0rERIBFzkRMxEzMzEwATIAERAAIyICNTUhJiYjIgYHNTY2EzI2NyEWFgIC+AEY/vrfz/YC8AW0pViealugmoGWDv3RAogEXv7V/vr++P7HAQvkbbrDHy2eJyD8IaaTl6IAAP//AAAAAAUbCAICJgAkAAABBwlMAo8BUgAbQA8EAwIAIA4FBiUEAwIdBSYAKzU1NQErNTU1AAAA//8AXv/sA9cGsAImAEQAAAEHCUwCSgAAABKyBAMCuP/ytDgmExklASs1NTX//wAAAAAFGwgCAiYAJAAAAQcJTwKPAAAADbcDAgAOFAUGJQErNTUA//8AXv/sA9cGsgImAEQAAAEHCU4CSgAAABCxAwK4//K0NTQTGSUBKzU1AAD////+AAAGkQa8AiYAiAAAAQcBTQGcAVIAILkAAv9VQBIXFgYHJQJ/F48XnxevFwQXBSYAK101ASs1AAD//wBe/+wGgQVqAiYAqAAAAQcBTQEZAAAADrkAA//XtD49ChclASs1AAEAff/sBb4FywAjAGxAPRIEBhgMAgYGIR0dHyMMBCQlBR8gH2xZAiAgCQAAI2lZDwAfAC8ArwC/AAUJAwAACRAQFWlZEAQJG2lZCRMAPysAGD8rERIAORgvX15dKxESADkYLzMrEQAzERIBFzkRMzMRMxEzETMzMTABIREzFSMRBgYjIAARNBIkMzIXByYjIAAREAAhMjc1ITUhNSEDQgH5g4N08J7+sv6StgFX6erKRsG4/vv+2gEaAQ2TjP7TAS3+vwME/s2S/vglJgGMAWPlAVa1VqBU/sT+7v7e/tIjkZKPAAIAcf4UBK4EXgAiAC4AgUBLHiEUEhktBxImIhANAQEiIR8HBS8wDQIEChUfIB9eWRIPIB8gLyADIQMgIBcEDg8KKl1ZChAEI11ZBEATFkgEQAoOSAQVFxxdWRcbAD8rABg/KysrABg/KwAYPxESOS9fXl0zKxEAMxESOTkREgEXOREzMxEzMxEzMxEzETMxMCU3IwYjIgIREBIzMhczNzMRFAczFSMGISInNRYzMjchNSE3JTI2NTU0JiMiBhUQA4sGCG/l1+3u1N95CxiPBHWTYf6Y8Jug9Z1T/s8BbAT+xauSmKmMlR+HpgEiAQsBBwEqppL7pCgkkvxGplZmkj20r8Ah3MjQzP5oAP//AH3/7AU7B3MCJgAqAAABBwFMAPgBUgATQAsBJgUmAXooIwgCJQErNQArNQAAAP//AHH+FAQ9BiECJgBKAAABBgFMFwAAC7YCHjUwFB0lASs1AP//AMcAAAT0B3MCJgAuAAABBwFMAEwBUgAVtAEXBSYBuP/NtBkUBgAlASs1ACs1AP//AK4AAAQzB5wCJgBOAAABBwFMAAIBewAWuQAB//BACRwXDAYlARoCJgArNQErNf//AH3+PQXDBc0CJgAyAAABBwFRAn0AAAALtgIpIh4GACUBKzUAAAD//wBx/j0EaAReAiYAUgAAAQcBUQG0AAAAC7YCFCIeBwAlASs1AAAA//8Aff49BcMGvAImADIAAAAnAU0AxQFSAQcBUQJ9AAAAJUAbAn8bjxufG68bBBsFJgMpJiIGACUCABsaBgAlKzUrNQArXTUA//8Acf49BGgFagImAFIAAAAmAU0QAAEHAVEBtAAAABa3AxQmIQcAJQK4//+0GxoHACUrNSs1AAD//wBI/+wEOwdzAiYDsgAAAQcBTP/OAVIAFrkAAf/1QAknIhgZJQElBSYAKzUBKzX//wAd/hQDtgYhAiYC5wAAAQcBTP9yAAAADrkAAf/0tCYhFxglASs1//8AxwAACgcFtgAmACcAAAEHAD0FwwAAAAu2AjQTGgAcJQErNQAAAP//AMcAAAlKBbYAJgAnAAABBwBdBdcAAAALtgI+ExoAHCUBKzUAAAD//wBx/+wIXwYUACYARwAAAQcAXQTsAAAAC7YCWCMqDywlASs1AAAA//8Aff/sBTsHcwImACoAAAEHAHYBZAFSABNACwElBSYB2SUhCAIlASs1ACs1AAAA//8Acf4UBD0GIQImAEoAAAEGAHZKAAALtgJEMi4UHSUBKzUAAAEAx//uBskFtgAZAFxANAUBAQIJBhgSDw8YAgMaGxAPBQBpWdgFAToFAQkFAQ8ABaAFAhIDBQUCBwMDAhIVDGlZFRMAPysAGD8/MxI5L19eXV5dXV0rABg/ERIBFzkRMxEzMxEzETMxMAERIxEzESERMxEUFjMyNjURMxEUBiMiJjURAX+4uAJeuV5gW2G5yLOuwwKq/VYFtv2WAmr7ml1mZ14C+P0Ipb+8qgFWAAAAAgDH/hQE2QXNAA4AGAA7QB4LDwcHCAMSCBIZGgsGAAYPalkGFgkDCBsAFWlZAAQAPysAGD8/PysREgA5ERIBOTkRMxEzETMzMTABMhIVEAAFESMRMxczNjYBJAARNCYjIgYVAx3S6v5L/lu4kR8KTMv+5wFKAViYjcG8Bc3++uz+tv4Gq/4oB6TGcWr6y4oBqgENpq7j8v//AMcAAAVOB3MCJgAxAAABBwBDAGYBUgAVtAEVBSYBuP+ttBkdCRMlASs1ACs1AP//AK4AAARMBiECJgBRAAABBgBD6gAADrkAAf++tBoeChQlASs1AAD//wAAAAAFGwdzAiYAJAAAAQcDcwTdAVIAGrEDArj/mUAKHA4FBiUDAhkFJgArNTUBKzU1//8AXv/sA9cGIQImAEQAAAEHA3MEmAAAABCxAwK4/4u0NCYTGSUBKzU1AAD//wAAAAAFGwc+AiYAJAAAAQcE8QKPAVIAE0ALAgAZEQUGJQIZBSYAKzUBKzUAAAD//wBe/+wD1wXsAiYARAAAAQcE8QJUAAAADrkAAv/8tDEpExklASs1//8AoAAAA/gHcwImACgAAAEHA3MExQFSABm2AgEXBSYCAbj/sLQbFQILJQErNTUAKzU1AP//AHH/7AQbBiECJgBIAAABBwNzBLgAAAAQsQMCuP+9tCslAwolASs1NQAA//8AxwAAA/gHPgImACgAAAEHBPECYgFSABNACwEXBSYBARcPAgslASs1ACs1AAAA//8Acf/sBBsF7AImAEgAAAEHBPECVgAAAAu2Ag8nHwMKJQErNQAAAP///4UAAAJkB3MCJgAsAAABBwNzA6oBUgAZtgIBFwUmAgG4/5u0GxUGCyUBKzU1ACs1NQD///8xAAACEAYhAiYA8wAAAQcDcwNWAAAAELECAbj/mbQTDQIDJQErNTUAAP//AB0AAAKZBz4CJgAsAAABBwTxAVwBUgATQAsBFwUmAQEXDwYLJQErNQArNQAAAP///8kAAAJFBewCJgDzAAABBwTxAQgAAAAOuQAB//+0DwcCAyUBKzX//wB9/+wFwwdzAiYAMgAAAQcDcwVxAVIAGbYDAiMFJgMCuP+ctCchBgAlASs1NQArNTUA//8Acf/sBGgGIQImAFIAAAEHA3MExwAAABCxAwK4/6W0JyEHACUBKzU1AAD//wB9/+wFwwc+AiYAMgAAAQcE8QMhAVIAE0ALAiMFJgIAIxsGACUBKzUAKzUAAAD//wBx/+wEaAXsAiYAUgAAAQcE8QJtAAAAC7YCACMbBwAlASs1AAAA//8ApgAABNsHcwImADUAAAEHA3MEywFSABqxAwK4/3pACiQWDBAlAwIhBSYAKzU1ASs1Nf//ACMAAAMvBiECJgBVAAABBwNzBEgAAAAQsQIBuP+ktCEbDAIlASs1NQAA//8AxwAABNsHPgImADUAAAEHBPECfQFSABa5AAL/4UAJIRkMECUCIQUmACs1ASs1//8ArgAAAy8F7AImAFUAAAEHBPEB8AAAAAu2AQAdFQwCJQErNQAAAP//ALj/7AUfB3MCJgA4AAABBwNzBVgBUgAZtgIBHQUmAgG4/7e0IRsIASUBKzU1ACs1NQD//wCi/+wERAYhAiYAWAAAAQcDcwTHAAAAELECAbj/n7QkHhQKJQErNTUAAP//ALj/7AUfBz4CJgA4AAABBwTxAu4BUgATQAsBHQUmAQEdFQgBJQErNQArNQAAAP//AKL/7AREBewCJgBYAAABBwTxAnkAAAALtgEFIBgUCiUBKzUAAAD//wBO/+wERgXLAgYBsQAAAAEAFP4UA7YEXgAoAExAKRMQAxYjIwMnChwFKSoTJygoJ15ZACgBDgMoKBoNDQZdWQ0QGiBdWRobAD8rABg/KxESADkYL19eXSsREgA5ERIBFzkRMxEzMzEwATI2NTQmIyIGByc2NjMyFhUUBgcWFhUUBgYjIic1FhYzMjY1NCYjIzUBi5CqnoU9flY9Wp9f0/KCgqWkhvmf7pZb0GGiwNDOoQHTj3Fxhx4ojysfxa19tCwqzZaQ4nxMpCsvuZudqY8AAP//AMcAAAUlB3MCJgArAAABBwFMAJgBUgATQAsBFgUmAQAYEwYLJQErNQArNQAAAP//AK4AAARMB5wCJgBLAAABBwFMAE4BewATQAsBLyMeChYlASECJgArNQErNQAAAAABAMf+FAUzBc0AEgAzQBkHAwMEDg8EDxMUBwQLBQMEEg8bCwBpWQsEAD8rABg/Pz8REjkREgE5OREzETMRMzEwASARESMRMxczNjYzIBERIxE0JgMj/ly4kR8KQvl9Afq4qAUt/jX8ngW4vF10/eP6ZAWcx7YAAP//AHH+FAUnBhQCBgRFAAAAAgB3/+wE5wW2ABsAJQBMQCcIAxQZHBIZFgYDCyIiAxYSBCYnCBQkAAAka1kAAA4EFwMOH2lZDhMAPysAGD8zEjkvKxESADk5ERIBFzkRMxEzETMRMxEzETMxMAEyNjU1MxUQBxYWFRQAISIkJjUQJSYRNTMVFBYDFBYzMjY1ECEgAq6XoLnsl57+z/70pf7/jQEx57ig4Ly5ub7+h/6NA8Ool7S0/thjMNGb5f72eeOTAUFdXwEqtLSXqP4Yp6mqpgFKAAIAcf/sBGgGFAAcACgATEAnCQMUGh0SGhcGAwwjIwMXEgQpKgkUJgAAJl1ZAAAPBBgADyBdWQ8WAD8rABg/MxI5LysREgA5ORESARc5ETMRMxEzETMRMxEzMTABMjY1ETMRFAYHFhYVFAAjIgA1ECUmJjURMxEUFgMUFjMyNjU0JiMiBgJtiXm0XmmGhv7v7+T+7QEKaVy0ebiioJ2kp52dogPhk54BAv74nrQnM9me7/7yARXoAUtiJ7aZAQj+/p6T/giwuLaysrGy//8ATv5qBEQFtgImAD0AAAEHA30CwQAAAAu2AQATEwkJJQErNQAAAP//AFD+agNzBEoCBgYWAAD//wAAAAAFGwc3AiYAJAAAAQcBTwGFAVIAE0ALAgAOFAUGJQIXBSYAKzUBKzUAAAD//wBe/+wD1wXlAiYARAAAAQcBTwE1AAAADrkAAv/ntCYsExklASs1//8Ax/4UA/gFtgImACgAAAEHAHoBewAAAAu2AQMSDAILJQErNQAAAP//AHH+FAQbBF4CJgBIAAABBwB6AW8AAAALtgIQIhwDCiUBKzUAAAD//wB9/+wFwwgdAiYAMgAAAQcJTAMhAW0AG0APBAMCJwUmBAMCADMyBgAlASs1NTUAKzU1NQAAAP//AHH/7ARoBrACJgBSAAABBwlMAm0AAAAQQAkEAwIAMzIHACUBKzU1NQAA//8Aff/sBcMIHQImADIAAAEHCU0DHwFtACBADAMCsCHAIQIhBSYDArj//7QhLQYAJQErNTUAK101NQAA//8Acf/sBGgGsAImAFIAAAEHCU0CagAAABCxAwK4//60IS0HACUBKzU1AAD//wB9/+wFwwc3AiYAMgAAAQcBTwIXAVIAE0ALAiEFJgIAGB4GACUBKzUAKzUAAAD//wBx/+wEaAXlAiYAUgAAAQcBTwFiAAAADrkAAv//tBgeBwAlASs1//8Aff/sBcMIAgImADIAAAEHCU8DHwAAABCxAwK4//60JyYGACUBKzU1AAD//wBx/+wEaAayAiYAUgAAAQcJTgJtAAAADbcDAgAnJgcAJQErNTUA//8AAAAABIcGvAImADwAAAEHAU3/6gFSAB1AFAF/DI8MnwyvDAQMBSYBAQwLBwIlASs1ACtdNQD//wAC/hQEFAVqAiYAXAAAAQYBTbEAAAu2AQEbGgAKJQErNQAAAgAO/8MC+AYUABIAHABQQCkQEgIbGxIHFhIWHR4bDRkCEgQPCgQZZFkPBB8EAgkDBAoAAAoTZFkKFgA/KwAYPxDEX15dKwAYEMYROTkROTkREgE5OREzETMRMxEzMTATMxE2MzIWFRQGIyImJwYHJzY3BTI2NTQmIyIHFq60P018jo6DXoYmHieKSlYBNzwzPzY+PwcGFPuqGYltcoNHPD1vPchtvj0tNTYxpAAAAAACAK7/wwXhBF4AIgAsAG9AOw0PGxcXGAArKw8FJiYPGAMtLgAPAisKKQwbGB8MCAIpZFkPAh8CAgkDAggZDxgVHxNdWR8QCCNkWQgWAD8rABg/KwAYPz8QxF9eXSsAGBDGERI5ERI5ORI5ORESARc5ETMRMxEzETMRMxEzMTABNjMyFhUUBiMiJwYHJzY3ETQmIyIGFREjETMXMzY2MzIWFRMyNjU0JiMiBxYETD9MfI6LhbVTHiiJSlZ3f6mZtJEbCjO4b8rEgzQ6PjY+PwcBvhmJbXKDgzlzPchtAYmGg7vT/ccESpZRWcTP/aw1NTU2MaQAAgAd/8MDBgVGABoAJABtQDgIHgEYGBoGCiMjGg8eGh4lJgoaDCMVIRcSDCFkWQ8MHwwCCQMMEgMEBkAACQYJZFkGDxIbZFkSFgA/KwAYPysRADMaGBDNMxDEX15dKwAYEMYROTkROTkREgE5OREzETMRMzMRMxEzETMxMBMjNTc3MxUhFSERNjMyFhUUBiMiJicGByc2NwUyNjU0JiMiBxa8m51IawE9/sM/THyOi4VehiYeKIlLVAE4NDo+Nj4/BwO+Vkjq/Iz+ABmJbXKDRzw5cz3Lar41NTU2MaQAAAADAHH/7AdWBhQAHQAnADMAVEAsMQMPJiYMKxEJFyAgCSsDBDQ1CRIcAwAGDQAjLgYuXVkUBhAeKAAoXVkaABYAPzIrEQAzGD8zKxEAMxg/ERIXORESARc5ETMRMxEzMxEzETMxMAUiAhEQEjMyFzMmJjURMxEUBzM2MzISERACIyADAiUgETQmIyIGFRAhMjY1NCYjIgYVFBYCVuv67dfddw0ECbQKCm/l2ez97P7mcnICAAEtkpemkP4fkZ6XpouYmxQBIgEVAQ0BLqIahDkBgf6GcXGk/tX+9P7s/tsBAP8AlQGm0NC84P5W4cnivN3NzNIAAwBx/hQHVgReAB4AKAA0AFNAKyEXCSwNDScRDgMyMg4XAzU2EhwJAxQaDhspHxofXVkAGhAvJBQkXVkGFBYAPzMrEQAzGD8zKxEAMxg/ERIXORESARc5ETMRMzMzETMzETMxMAEyEhEQAiMiJyMXFhURIxE0NyMGIyICERASMyATNjYFIBEUFjMyNjUQISIGFRQWMzI2NTQmBXHr+u3Y3nYMBAi0Cgpr6dft/ewBGXI6zv1r/tORmKWQAeGQn5Wpi5ibBF7+3v7r/vP+0qEjS2n+XgGccHGjASoBDQEUASX/AIR8lf5a0c+93wGq4Mrfv93NzNIAAAADAAD/ZgUbBhQADwAWABkAZEA3CwAZARYFCBgQDhMBEQIRExAIBwYaGxMLnw8BDw8fDy8PAwkDDwwHCwUJFglpWRgWFgsMAwMLEgA/Mz8SOS8zKxEAMxgQxhDGX15dcRI5ERIBFzkRMxEzETMRMzMRMzMyMTABAwEjAyEDIxMjAyMBMxc3ARMmJwYHAwEDMwP+mAG1v7D+0dGJ046uugI7pj5W/rqeKBoeIaYBbFy/BhT+TvueAcX9oQJf/jsFvJ/3/FQBx3FeeGP+RQEJ/vcAAgB9/2YEzwYUAB4AJgBkQDgMAyIXERQHJAEdAwYGHSUkFBMXBycoBiQfCRQBDxsTJx5AIilIHkAJDUgeGxsfaVkbBA8JaVkPEwA/KwAYPysAGBDGKysQxhESOTkREjk5ERIBFzkRMxEzETMRMxEzETMxMAEHFhcHJicBFjMyNxUGBiMiJwcjNyYCETQSJDMyFzcHIgAREBcBJgR5Jz1ASkkj/npJW5zDXaxwX2I1iUOvtacBP9hOSh237P7y3gF/KAYUcBAfnCAL+54VOqAiGRSaw1IBYgEA4gFUuApT6/7E/u7+iY0ESggAAAACAHH+VgPVBhQAGwAiAFxAMAADEhUMAxwVEBMHHgEaAwYGGh8eExUGIyQGHiAJARMYDhIbABggYVkYEA4JYVkOFgA/KwAYPysAGD8vERI5ORESOTkREgEXOREzETMRMxEzETMRMxEzETMxMAEDFhcHJicBFjMyNxUGIyInAyMTJhEQADMzFxMBFBcBIyIGA9WmOSs3PyP+6TlIkYxyqV1dmomuxgEL9x4dnv3fTAEACKaeBhT+MhATlhcI/PIXQKA7Gv5QAemQAVABFAErAgG4/A3GZwLN0QABABQAAAP+BbYADQA9QB8NCwQICAELCgYLAw4PBw0ADWxZBAAACwIDCwhpWQsSAD8rABg/EjkvMysRADMREgEXOREzMxEzETMxMBMzETMRIRUhESEVIREjFLO4AVz+pAJ//MmzAzkCff2Dkf38pAKoAAAAAAIAFP9mBFwGFAAQABMAREAlEhEGBgsHAwABBwoNBhQVDxAfEAIJAxAOCgcSBBMNDg1pWQEOAwA/MysRADMzGD/EEMZfXl0REgEXOREzMxEzMzEwAQczFSMBESMRAyMBESE1ITcBEyMEKS1gqv7kuc+PAV7+NwNZLf78jY0GFF6i/Zz9UAEj/kMC8gK8ol790QEvAAABAGj+FAOHBF4AMgBRQCojCAgwESkeMBcXAB4DMzQRIQAaHhcsKTAhJl1ZIRAAFF5ZABYKBF1ZChsAPysAGD8rABg/KxESADkREjkREjkREgEXOREzETMzETMRMzEwBRYXFjMyNjcVBiMiJicmJyYnNRYzMjY1NCYnLgI1NDYzMhcHJiMiBhUUFhceAhUUBgHhJRcitSdZE0Zgo6ofG39FLLWoiHx3mJt+O9zAu6M9p4ZwdGS3iYM+0hI2YqwSCZQdlqyOIhIZpFhYSkFaOjxVakyHnEqPRkc+PE9GM1huTZOnAAABAFD+FAOsBEoAGABCQCMJGBgUFAAVEgQZGhgVFhYVZFkWDxMAEhIAZFkSFQsGXVkLGwA/KwAYPysREgA5GD8rERIAORESARc5ETMRMzEwJRYWFxYWMzI3FQYjIiYnJiYjIzUBITUhFQEfcIQkF3NkRkE7WKe4MRptakgCTv3VAvGLFJOOW1EXlBmQsmFJdwNHjIcAAQAEAAADTAXLABUALUAWFQARBAQACwMWFxQBAQ4AEg4HaVkOBAA/KwAYPxI5LzMREgEXOREzETMxMDMRNjY1NCYjIgYHJzY2MzIWFRQCBxH+tNeKgEqyO0RLzmrT8tDGApM34pFzeTgskzY90bao/utc/dUAAAAAAQAZAAADMwReABMALUAWEwAPBAQACgMUFRIBAQwAFQwGXVkMEAA/KwAYPxI5LzMREgEXOREzETMxMDMRNjY1NCMiBgcnNjMyFhUUAgcV/rPK/EacQUOe0MzgyLkBJzfhkvg1MIhyy7ym/u5WyQAAAAMAHwAABMUFtgATACAAKQCKQE0LDAwlARMhFRkZAxMIJQ8dHSUXEwQqKxgBAgFpWRUNAgESBAICEwsUISEUa1k4IQGaIQFpIQEPIR8hAgkDISETBAQpa1kEAxMZa1kTEgA/KwAYPysREgA5GC9fXl1dXXErERIAORI5GC9fXl0zKxEAMxESARc5ETMRMxEzMxEzMxEzETMRMzEwEyM1MxEhIAQVFAYHFRYWFRQEIyETFSEVIRUhMjY1NCYjJSEyNjU0JiMjx6ioAaEBJgEFjoipn/708P3+uAEv/tEBJ7CqtLT+5wEOrJyrufIBXqADuK+7gqkZCh2wkcTcAq6woMKIioN9mm6BeGoAAAACABT/7AXFBbYAFAAdAE9AJwMFDQsSGxsPCwEFBRMYCxgeHwQaDQ4NaVkBEg4OCBQQAwgVaVkIEwA/KwAYPzMSOS8zMysRADMzERIBOTkRMzMRMxEzMxEzETMRMzEwAREzFSMVFAAhIAA1NSM1MxEzESERATI2NTUhFRQWBR+mpv7S/vT+9/7cpKS5Avf+iLXD/Qm/Bbb9v6DR+v7iASH7zaACQf2/AkH61cK30dOzxAAAAP//AAAAAATbBbYCBgFpAAAAAwDH/2YD+AYUABMAFwAbAJtAVwsHAxQYGBAMDwkZCBoFFQESAwQEEhYVGhkPDhAJHB0AEwEhAxMRQA4QCBsUG2lZBdgUAToUAQkUAQ8AFKAUAhIDFBQQEQQXERdpWQERAwkYEBhpWQwQEgA/MysRADMYPzMrEQAzERI5GC9fXl1eXV1dMysRADMYEMYaEM5fXl0REgEXOREzETMRMxEzETMRMxEzETMRMzMxMAEHMxUjAzMVIQMhFSEHIzcjESE3ATMTIREzEyMDrBlljnTd/vqDAa7+KSeFJ9UCRxn+WPRy/ppGhcsGFF6i/jig/faimpoFtl79OAHI+44CCgAAAAQAcf5WBBsGFAAdACMAJwArAJNATwADChcXDygDHAERKhArJyImIwsIDyQkCCMiKyoBAwgsLQAqKBMmIR4BCxoGECcoISEoXlkZIQEDDyEBEAYhIRoGCQAGHl1ZBhAaE2FZGhYAPysAGD8rABg/ERI5L19eXV9dKxESADk5ERI5ORESORESORgvERIBFzkRMxEzETMRMxEzETMRMxEzETMRMxEzMTATEyYREAAzMhcTMwMWFhUVIQMWMzI2NxUGBiMiJwMBIgYHMxMTJicDBRYXE56q1wEG3x87nImocX3+Uns+WViealugbWtalQEtgZYO5WvfBGJa/o0EVGD+VgHsjwFMAQgBOQgBvv4bOeifbf6eGx8tnicgHP5OBXWmlAE2/sq4T/75j7RgARQAAAAB/2D+ewIbBbYAFQA/QB8CERMKCA8TEwwICBYXEgoLCmlZDwsLAA0DAAVpWQAiAD8rABg/EjkvMysRADMREgE5ETMzETMRMxEzMjEwAyInNRYzMjY1ESM1MxEzETMVIxEUBgheOkdNZGSoqLmmpsX+exubFHlyAnugApP9baD9lMbWAAAC/4/+FAH8BeUAFQAhAFlAMAIREwoIDxMTDAgcCBYDIiMZH2NZYBkBDxkBDAMZDRIKCwpeWQ8LCyINDwAFXVkAGwA/KwAYPxI5LzMrEQAzGBDEX15dXSsREgEXOREzMxEzETMRMzIxMBMiJzUWMzI2NREjNTMRMxEzFSMRFAYDNDYzMhYVFAYjIiYtXkBFQ05Jmpq0mpqdJT0tKj8/Ki09/hQZkRRVVwKmkQG9/kOR/WCkpAdfPDY2PDs4OAAAAAACAH3+FAY3BcsADgAtAElAJikvAxkjESAKAy0ZLS4vER8VHCEDHABpWRwEFQZpWRUTKyZpWSsbAD8rABg/KwAYPysAGD8REjk5ERIBOTkRFzMzETMRMzEwASICERASMzI2NjURNCYmATQ3IwYGIyIkAjUQACEyFhczNzMRFBYzMjcVBiMgEQLjx9/eyp7EW1zGASIICjnlpbv+7JEBRwEjkO47Ch+ROkcyKi9D/t0FK/7J/uf+6/7FXLagATyhtlv6/jZdYHC2AVblAWEBjW5jvPmoV1EVnBsBQgAAAgBx/hQFCAReAAwAKABLQCckKgoVAygeGw8PKBUDKSoQGhIYHA8YB11ZGBASAF1ZEhYmIV1ZJhsAPysAGD8rABg/KwAYPxESOTkREgEXOREzMxEzETMRMzEwJTI2NzU0JiMiBhUUFgU0NyMGIyICERASMzIXMzczERQWMzI3FQYjIhECUqGUBJiljZaVAckKDHPl1Orv1eF1CBuPLThAJipl8IGwyyXjxd7MydWYbjynASwBCwEMAS+qlvsjcFUWiSEBVgAAAAACABQAAATbBbYAEAAZAEtAJQ4LDRUEAhEBAQYCCxUCFRobDQAEBQRrWREFBQcPAhIHGWlZBwMAPysAGD8zEjkvMysRADMzERIBOTkRMxEzMxEzETMRMxEzMTABESMRIzUzESEgBBUQBQEjASUzMjY1NCYjIwF/uLOzAZMBEAEF/tsBkdf+nv7d27KkprrRAlz9pAJcnAK+z9D+3WX9cQJcnIyKin8AAAABABQAAAMvBF4AFwBSQCwFAwoWAgIHAxAAAwMYGQoDDgEFBgVeWRYABhAGAgsDBgYDCA8DFQ4TYlkOEAA/KwAYPz8SOS9fXl0zKxEAMxESORESARc5ETMzETMzETMxMAEhESMRIzUzETMXMzY2MzIXByYjIgYHMwJk/v60mpqUFAg/rGVJOBY9OnWzFP4B/P4EAfyRAb3JbXAMpg6mhwAAAgAAAAAEhwW2ABEAFABMQCcDFhATCRQJCgYECg8NBRUWCAsKEAcUDQ4NaVkEAA4OEhIKAhADChIAPz8zEjkSOS8zMysRADMzERI5ORESARc5ETMzETMyETMxMAEhEzMDMxUjAREjEQEjNTMDMwETIQFSAeOMxo970f7+uf78z3mNyQF7nf7FBLABBv76oP4n/ckCLwHhoAEG/TEBKQAAAAIAAv4UBBQESgAaACEAVUAsCSMdBwwKBAETCgcHGh4DEwUiIyEEGhUeDQECAV5ZCgYCAhEIBA8RFl1ZERsAPysAGD8zEjkvMzMrEQAzMxg/EjkREgEXOREzETMzETMRMxEzMTATIzUzAzMTIRMzAzMVIwEGBiMiJzUWMzI2PwI2EyEWFhfNuX+RwYkBg4PCiXWs/udFvoxLSjJGVngmOVgZb/7nPz0NAk6RAWv+lQFr/pWR/Rq2nhGPDF9jkrJqATalskkAAAD//wCm/+wEHwRcAQ8ARAR9BEjAAAAJswEAFhYAPzU1AAACAHH/7AQ9BF4AEAAdADxAHhsDDBQJDgMOHh8JDwAGCg8NFQYYXVkGEAARXVkAFgA/KwAYPysAGD8/ERI5ORESATk5ETMzMxEzMTAFIgIREBIzMhczNzMRIycjBicyNjU1NCYjIgYVFBYCM9bs7dfddwgdj5EbCHPGpJeZpIuYlxQBKAEPAQ0BLqKO+7aTp5WzzCHlw93NzNIAAP//AK//7AR7BF4BDwRABOwESsAAAAmzAQAGFgA/NTUAAAIArv/sBHsGHwAdACoARUAkDRoaHRMoKAUdAyssGg0WEB0VAgdhWQIBEB5dWRAQFiVdWRYWAD8rABg/KwAYPysAGD8REjk5ERIBFzkRMxEzETMxMBMQMzIXFSYjIgYVFRQHMzYzMhIREAIjIiYnIwYHIwEiBhUVFBYzMjY1NCau+EVCLzsvMgoKb+XZ7PDVb643Dh8GgQHqppCTp5SRkgUAAR8blRQ2QXJxcaT+1f70/vD+11BPeBMDx7zgCOHB2c3Q0AABAET/7ANmBF4AFwAoQBQMFxIFFwUYGQ8IYVkPEBUCYVkVFgA/KwAYPysREgE5OREzETMxMDcWMzI2NTQmIyIGByc2NjMyABEQACMiJ1aMi6WaoKI3hjI3MaBe7QEG/vXxonLHQNPPxtQdGZYZIv7b/vL+6f7YOwACAGL/nAPpBF4AHQAnAF1AMQ8bBRQJIAcbJSUHCQMoKRYHGAIeBAAYHl1ZDxgfGAIJAxgYAAwMEmFZDBAAIl1ZABYAPysAGD8rERIAORgvX15dKwAYEMYRORE5ORESARc5ETMRMxEzMxEzMTAFIicGByc2NyY1EAAzMhYXByYjIBEUFzYzMhYVFAYDIgcWMzI2NTQmAoHNfSsgiitATgEL91SbMjiLYv68FaDCjqvHfpd3UpdXaVEUYlFhP35uhdMBFAErIhmWNP5gaU6eh3OFnQGHlF5NPDA5AAAAAgBx/hQFJwYUAB8ALABNQCgGLioUHSMKABoODgoUAy0uGg8RFx4AFyddWRcQESBdWREWCANdWQgbAD8rABg/KwAYPysAGD8REjk5ERIBFzkRMzMRMzMRMxEzMTAFFBYzMjcVBiMgETU0NjcjBiMiAhEQEjMyFzMmJjURMwEyNjU1NCYjIgYVFBYEPTFESSwvbf7+CgMNdt7X7e3X3XcNAwq0/hOkl5mki5iXk2pbFokhAVaCGHcSoQEqAQ0BDQEuohR5FQG2+m2zzCHlw93NzNIAAAACAHH/7AUIBh8AHAApAEtAJwUrJxMcIAsZDQ0gEwMqKxkOEBYMFQIHYVkCARYkXVkWEBAdXVkQFgA/KwAYPysAGD8rABg/ERI5ORESARc5ETMzETMRMxEzMTABEDMyFxUmIyIGFREjJyMGIyICERASMzIXMyYmNQEyNjU1NCYjIgYVFBYDifhIPy87LzKRGwhz49bs7dfddw0DCv7HpJeZpIuYlwUAAR8blRQ2Qfr0k6cBKAEPAQ0BLqIUeRX8I7PMIeXD3c3M0gAAAP//AGj/7AQSBF4ARwBIBIMAAMAAQAAAAAACAGj/7AQSBF4AFAAbAFdANREZCQMLCxgJAxwdChleWQkKARIPCh8KAg8KLwo/Cn8KjwoFEwMKCgYAAA5hWQAQBhVdWQYWAD8rABg/KxESADkYL19eXXFeXSsREgEXOREzETMzMTABMgAREAAjIgI1NSEmJiMiBgc1NjYTMjY3IRYWAgL4ARj++t/P9gLwBbSlWJ5qW6CagZYO/dECiARe/tX++v74/scBC+RtusMfLZ4nIPwhppOXogAAAAIAaP/sBhsEXgAjACsAZUA0AC0VKQ4cDwUoKA8OAywtAiFhWQICCh0FKAQpHA8OBB0pDh0OHQ4KGRkSYVkZEAokXVkKFgA/KwAYPysREgA5ORgvLxEzETMSOTkREjk5ERI5LysREgEXOREzETMRMzMRMzEwAQYjIgMHFxUQACMiAjU1JSYmIyIGBzU2NjMyBBc3FxYWMzI3ATI2NjUFFhYGG3N7t0sbAv7638/2AuQcq4tYnmpboG3EAQosriMXPi5SVPw5XIdG/c8LhwGqTgECBhUW/vj+xwEL5BHIhYwfLZ4nIMW0L3pVSzf+TlmppJiCjP//AFj/7AOYBF4CBgGCAAD//wBE/+wDjwReAgYB0QAAAAEARP/sBTMEXgAwAHtARRUyHB0dAgglDRoaAiArKwIvJQQxMhgSYVkYGCMKHC8wMC9dWUUwARkwAQgw6DACEA8wARQDMDAjCgoEXVkKECMoXVkjFgA/KwAYPysREgA5GC9fXl1eXV1dKxESADkREjkYLysREgEXOREzETMRMxEzETMRMxEzMTABIDU0IyIGByc2MzIWFzcXFhYzMjcXBgYjIicGBxUWFhUUBiMiJzUWMzI2NTQmIyM1AYcBN/lPiF8/q9SRyCpvIhc+Lk9XJzF9P6hML4d9dvrb8oS3vY2Ymp+UAoWonB4oj0xbVR97VUs3hSIuuV8kCCSIZ5esR6JWXlxeW5MAAAIAcf/sBHEEXgARACMAZUA5FxgYEAMhFRAbCAgQDCEEJCUXDA0NDF1ZRQ0BGQ0BCA3oDQIQDw0BFAMNDR4SEgBdWRIQHgZdWR4WAD8rABg/KxESADkYL19eXV5dXV0rERIAORESARc5ETMRMxEzETMRMzEwASIGFRQWMyA1NCYjIzUzIDU0JTIWFRQHFRYWFRQGIyAAERAAApO3sa69ASian1Y5ATj++Mzoz391/uL+/f7jASADx9DQ1tC4XluTqJqXmYi6OQgkiGeWrQEoARMBDwEoAAAAAAH/j/4UAfwESgAVAD9AHw0GCBUTBAgIARMTFhcHFQAVXlkEAAAWAg8LEF1ZCxsAPysAGD8SOS8zKxEAMxESATkRMzMRMxEzETMyMTATMxEzETMVIxEUBiMiJzUWMzI2NREjFJq0mpqdmF5ARUNOSZoCjQG9/kOR/WCkpBmRFFVXAqYAAAIAb/4UBQgGHwAlADIAVUAtGzQlMA0VKQUgEwcHBQ0DMzQTCAoQGB1hWRgBEC1dWRAQCiZdWQoWIwJdWSMbAD8rABg/KwAYPysAGD8rERIAOTkREgEXOREzMxEzMxEzMxEzMTATFjMyNjU1NyMGIyICERASMzIXMyY1NRAzMhcVJiMiFREUBiMiJwEyNjU1NCYjIgYVFBbFoPWMowYIb+XV7/HR33kNDfpGPy87Y+/88JsBiaaXmKmKl5P/AFakkSuHpQEpAQ4BCQEypnVAkwEfG5UUd/ri7O5GAiWzxivcyNvLzNYAAP//AHH+FAQ9BF4CBgBKAAAAAQBx/+wEBAReABkAPUAgCwAQBgAVFRcGAxobGBdeWRgYAwkJDmFZCRADE2FZAxYAPysAGD8rERIAORgvKxESARc5ETMRMxEzMTAlBgYjIgAREAAhMhcHJiMgERQWMzI3ESM1IQQEeLxq7f74ASMBAuN7QpKA/ouem4Np7AGgOSsiASMBEAEUAStKm0j+YMfTHQEtkQAAAAACAAD+GQQQBEoAGAAkAEdAIxgmDRIcHB8fCQAMAxkZDAkDJSYADBIcHBIiFw0PBiJdWQYbAD8rABg/MxI5ORESOTkREgEXOREzETMRMxEzETMyETMxMCUWFhUUBiMiJjU0NjcBMxMWFhczPgITMwE0JicGBhUUFjMyNgJtUTSFZ2qBOUz+YMHQPCoLCAkkK+7A/kIpIyQoLR8fLaKfp0Vvj49vT7CMA6j+Eo12LiFebgIy+tEygzo8fzI7NDIAAAAC//r/7gQUBF4ACQAxAEtAJi0zGgQSChUMAAAVEgMyMwoVIwICIwcdLxgdGF1ZKh0QDwddWQ8WAD8rABg/MysRADMREjk5ERI5ORESARc5ETMRMxEzMhEzMTAlNCcGFRQWMzI2ExYVFAYjIiY1NDY3ASYjIgc1NjMyFhcSFhczNjc3NjYzMhcVJiMiBwJSTEwtHx8tFIyAbGx/OFP+5SYuGCUuOjRHJ8MuDQghRJsnSTI7LSUYLiLdWGdrUjErKQFrv4Vnfn5pSItvAX81CoUYKTf+80khPGTXNioYhQovAAEApv4UBEQESgAWADBAFwEVCgcOCxULFxgPEggWDwsbEgRdWRIWAD8rABg/PzMSORESATk5ETMzMxEzMTABERQWMzI2NREzESMRNDcjBgYjIiY1EQFYd3+nmrW1Cw0ys3HNxARK/UGFg7jXAjj5ygHqVEZRWcPOAssAAAABAK4AAARMBh8AHwA7QB4NCx4eHxUWFgUfAyAhDREWHxUCB2FZAgERGl1ZERAAPysAGD8rABg/MxI5ERIBFzkRMxEzETMzMTATEDMyFxUmIyIGFRUUBzM2NjMyFhURIxE0JiMiBhURI676Q0IvOy8yCgwxtHHIyrJ3f6ebtAUAAR8blRQ2QcBaQFBav9L9NQK+hoO61v3JAAEArv4UBEwGHwAoAERAJBYUBwcIHigoIg4IBCkqFhoIFQsQYVkLARoDXVkaECAlYVkgGwA/KwAYPysAGD8rABg/EjkREgEXOREzETMRMzMxMAE0JiMiBhURIxEQMzIXFSYjIgYVFRQHMzY2MzIWFREQIyInNRYzMjY1A5p3f6ebtPpDQi87LzIKDDG0ccjK+ENCKkAvMgK+hoO61v3JBQABHxuVFDZBwFpAUFq/0vxo/uEblhU2QQACABQAAAH8BeUACwAXAFhAMQIEBwUABAQJBRIFDAMYGQ8VY1lgDwEPDwEMAw8KAwcIB15ZAIkIAXgIAQgIBQoPBRUAPz8SOS9dXTMrEQAzGBDEX15dXSsREgEXOREzMxEzETMRMzEwATMVIxEjESM1MxEzAzQ2MzIWFRQGIyImAWKamrSamrTCPS0qPz8qLT0CjZH+BAH8kQG9ASk8NjY8Ozg4//8AqP/sAqAESgIGAYYAAAABAEoAAAJGBEoACwA5QBwIAAAKBQEBCgMDDA0IBQYFblkGDwsCAQJuWQEVAD8rEQAzGD8rEQAzERIBOREzMxEzETMRMzEwISE1NxEnNSEVBxEXAkb+BKSkAfykpGojAy0la2sl/NMjAAEACgAAAvwGFAAbAFRAMRMDAxAEGQQLAxwdAgUABxMQFQ4LFQAAEAAgAAMABxgAAw8OHw4/Ds8OBA4OBBEABBUAPz8SOS9dFzMvXTMzERI5ORESOTkREgEXOREzMxEzMTABIicRIxEmIyIGByM2NjMyFxEzERYzMjY3MwYGAh0dI7QrGzExDmkNc2IaI7QrHTAxEGYMdQKTC/1iAvYSOzx6jQsCh/0hEjs8e4wAAAAAAv/sAAACuAYUABEAGgBMQCgVAwMLDw8IGBAQDRscBhJdWQ8GHwYCCQMGFw4AFwBdWQsXFxAJABAVAD8/EjkvMysRADMYEMRfXl0rERIBOTkRMzMzETMyETMxMBMiJjU0NjMyFxEzETMVIxEjEQMiBhUUMzM1NNl0eXNoQSe01dW0WCggWEgCYG9oY3IlAi384JT9oAJgARkpGUMWbwAAAAEArv4UAokGFAANACFADwcPAQwMDg8NAAkEXVkJGwA/KwAYPxESATkRMxEzMTABERQWMzI3FQYjIiY1EQFgSlRLQERgpJMGFPlZZGEWiSGqrAaqAAEArv4UBPwGFAAdAFNALBgSAgYMABoaGwYSEgMWGwQeHxcDAxZeWQMDCgAcABsVABldWQAPCg9dWQobAD8rABg/KwAYPz8REjkvKxEAMxESARc5ETMRMxEzMxEzETMxMAEhFQEWBBUUBgYjIic1FjMyNjU0JiMjNQEhESMRMwFiA2P+P+sBDYb5oO+Mt8yiwdDOeQHB/XC0tARKg/4MEPjJkOF9SKRWupqdqX0B8fxOBhQA//8Apv/sBs0ESgEPAFAHewRKwAAAB7IAIg8APzUAAAAAAQCm/hQGzQRKACUAQEAgFBEcGSUiBAAAGREDJicFCw4jGhIPABsfFg4WXVkIDhYAPzMrEQAzGD8/MzMSOTkREgEXOREzMzMRMxEzMTABETQ2NyMGBiMgJyMGBiMiJjURMxEQMzI2NREzERQWMzI2NREzEQYZCQMOMqpo/v5OCjW3dLq5st+YkbJudJiNtP4UAeAPiwhTV7hYYL/UAsv9Pf78r7oCXv09goK70gI6+coAAAAAAQCu/hQG1QReACoAS0AnEw8PEAYHISoqJQcQBCssGhMQFxEPBxAVAgsXC11ZHRcQIyhhWSMbAD8rABg/MysRADMYPzM/ERI5ORESARc5ETMRMxEzETMxMAEQIyIGFREjETQmIyIGFREjETMXMzY2MyAXMzY2MzIWFREQIyInNRYzMjUGI9+ZkLNtdJiNtJEbCi+ragECTgo1t3S6ufhDQipBYALDAQSyt/2iAsOCgrrU/ccESpZQWrhYYMDT/Gj+4RuWFXcAAAAB/8X+FARMBF4AHQA8QB4EDQAAChUWChYeHw0WEQsPFhURGl1ZERACB11ZAhsAPysAGD8rABg/PxESORESATk5ETMRMxEzMjEwBRAhIic1FjMyNjURMxczNjYzMhYVESMRNCYjIgYVAWL+8l0yLztIN5EbCjO4b8rEsnd/qZmW/qohiRZZbATdllFZxM/9NQK+hoO70wAAAAEArv4UBTUEXgAgAD5AHwciGRUVFgANFg0hIhkWHRcPFhUdEV1ZHRAKA11ZChsAPysAGD8rABg/PxESORESATk5ETMRMxEzETMxMAUUFjMyNjcVBgYjIiY1ETQmIyIGFREjETMXMzY2MzIWFQRMNUgcPxEVTyuJg3d/qZm0kRsKM7hvysSTalsOCIkOE62pA1SGg7vT/ccESpZRWcTPAAABAK4AAARgBEoADgAsQBQDBgYHAQ0KBwoPEAMKBw4IDwIHFQA/Mz8zEjk5ERIBOTkRMzMRMxEzMTABESMBFhURIxEzASYmNREEYN/9zwiq3QI4AgsESvu2A3Ogb/2cBEr8ixq5JwJ7//8Acf/sBGgEXgIGAnwAAAACAHH/7AZ/BF4AFwAjAHtARhgIEhYWDR4BEAAAFAEIBCQlDQIECxIVXVlFEgEZEgEIEugSAhAPEgEUAxISDgEBFl1ZARUOEV1ZDg8LIV1ZCxAEG11ZBBUAPysAGD8rABg/KwAYPysREgA5GC9fXl1eXV1dKxESADk5ERIBFzkRMxEzMzMRMxEzMTAhITUGIyImAjUQADMyFzUhFSERIRUhESEBFBYzMjY1NCYjIgYGf/0vgcWV5nwBDPLAfwLR/dkCBv36Aif6rKOfnaSln5Gub4OLAQSsAQwBK4Vxlv7Tlf6kAZHP19fPz9HiAAIAc//sBc8EXgAUACUAQkAhBiAPDBUAAAwgAyYnGyMNDR0jIwNdWSMQEgkdCV1ZGB0WAD8zKxEAMxg/KxESADkYLxE5ERIBFzkRMxEzETMxMAE0ACMiABUUFjMyNjURMxEUFjMyNjcUAiMiJyMGIyICNRAAISAABRv++/Hz/vV2aV9mrGVdaHq0z7jeRQpB4LfQAWcBSwFCAWgB1fABBP7776GzjnwBDf7zgIqspOL+/bi4AQPiATUBWP6lAAAA//8Ab/4UBVwGFAIGAd4AAP//AB//7AKgBEoBDwBVA04ESsAAAAeyAAoPAD81AAAAAAEAH//sAqAGFAARAChAEwwJDgIOEhMPDQoADRUABWJZABYAPysAGD8/EjkREgE5OREzMzEwFyInNxYzMjY2NREzESMnIwYGoEk4FkE2V5RVtJQUCD6uFAymD2CqZwQU+ezJa3IAAAAAAQAf/hQDiQRKAB0AN0AcBh8bCgAODgoUAx4fDxIcDxIXYlkSFggDXVkIGwA/KwAYPysAGD8SORESARc5ETMRMxEzMTAFFBYzMjcVBiMgETU0NjcjBgYjIic3FjMyNjY1ETMCoDNBTCkvbv8ACQMIPq5kSTgWQTZXlFW0k2xZFokhAVa9D4sIa3IMpg9gqmcCSgAAAAEArv4UAy8EXgARACpAFA4KCgsLAhMOEgAMDwsbAAViWQAQAD8rABg/PxESOREBOTkRMxEzMTABMhcHJiMiBgYVESMRMxczNjYCrkk4Fj06V5VUtJQUCD+sBF4Mpg5gqWf7ygY2yW1wAAAAAAEArv4UAy8EXgAbADNAGgIQEBsIFhsDHQIcBgAPBgtiWQYQGBNdWRgbAD8rABg/KwAYPxESOREBFzkRMxEzMTATMxczNjYzMhcHJiMiBgYVERQWMzI3FQYjIiY1rpQUCD+sZUk4Fj06V5VUSFQ9QERSpJMESsltcAymDmCpZ/0jZGEWiSGqrAABAKgAAAKgBF4ADgAfQA4OAAAGDxAAFQQKYVkEEAA/KwAYPxESATk5ETMxMDMRNDYzMhcHJiYjIgYVEaiarlJeFxpOOEhHAwivpyGZCBdaY/z6AAABACX+FAIdBF4ADgAfQA4OAAgADxAAGwoEYVkKEAA/KwAYPxESATk5ETMxMAERNCYjIgYHJzYzMhYVEQFqR0g4ThoWXlKtm/4UBPJjWhcImSGnr/sMAAAAAgCuAAAEWARKAA0AFQBBQCAMCAsOEwICAwgOAw4WFwsBEwFdWRMTBA0DFQQSXVkEDwA/KwAYPzMSOS8rEQAzERIBOTkRMxEzETMRMxEzMTABIREjESEyFhUUBgcBIwM0JiMhESEgAm3+9bQB7LfOgn4BOdEWdnT+2wECAQ0BtP5MBEqsmHihIP4zAwRVW/6WAAAA//8ArgAABFgESgFHBG8AAARKQADAAAAJswEAAw8APzU1AAAAAAEAaP4UA3kEXgAvAE1AJyMADBcpBgYeEQAXERcwMRoXHiwAKSEmXVkhEAMUXlkDFg4JXVkOGwA/KwAYPysAGD8rERIAORESORESATk5ETMRMzMRMxEzETMxMAEUBiMiJxUUFjMyNxUGIyImNREWMzI2NTQmJy4CNTQ2MzIXByYjIgYVFBYXHgIDeebQXkZIUz1ARFGjlbWoiHx3mJt+O9zAu6M9p4ZwdGS3iYM+AS+aqQyLZGEWiSGprQFrWFhKQVo6PFVqTIecSo9GRz48T0YzWG4AAf/F/hQCTAYfABYAKEAUEBgEAAoKFxgNE11ZDQECB11ZAhsAPysAGD8rERIBOREzMhEzMTAFECEiJzUWMzI2NREQITIXFSYmIyIGFQFi/vJdMi87SDcBDlw0ET4cSDeW/qohiRZZbAVcAVYhiQgOWWsAAAAB/8X+FAJMBh8AHgBaQC8YIAgCBBAOAAQEEg4OHyADEBEQXlkAGREBAw8RARAGEREGFRUbXVkVAQYLXVkGGwA/KwAYPysREgA5GC9fXl1fXTMrEQAzERIBOREzMxEzETMRMzIRMzEwATMVIxEQISInNRYzMjY1ESM1MxEQITIXFSYmIyIGFQFimpr+8l0yLztIN5qaAQ5cNBE+HEg3Ao2R/W7+qiGJFllsAo+RAjwBViGJCA5ZawAAAf/j/hQCTAReABcAKEAUExkFDBcXGBkIA11ZCBAVD11ZFRsAPysAGD8rERIBOREzMhEzMTATNCYjIgc1NjMyFhURFBYzMjY3FQYjIBGuODcuLi9WfX02SRw+ETRc/vIDFGRTF4kho578UGxZDgiJIQFWAAAC/x/+FAJMBh8AGQAhAFBAKwYjIBYWDRAAHQ8QHRAiIwMJXVkDARAeGR5dWQ0PGR8ZAgkDGRUTGl1ZExsAPysAGD9fXl0zKxEAMxg/KxESATk5ETMRMxEzMhEzETMxMDMRECEyFxUmJiMiBhURMxUjBgYjIiY1NDYzAzI2NyMiFRSuAQ5cNBE+HEg3mpoDjI+EoZaTE0I1AlqLBMkBViGJCA5Za/s5k6uuh2t7f/6oVXBjYgABAC0AAALDBVoAFQBmQD8LCQIQCRIOEA4WFwwQCREREGRZDxEvET8RTxHvEf8RBhMDEREGDgYAXVlPBgFfBgEABjAGkAYDEAagBgIGDhUAP8RdcV1xKxESADkYL19eXSsRADMRMxESATk5ETMzETMRMzEwEyIHNTY2MyARETMVBwcjNSE1IRE0JsdPSx9pMAFCnJ5Iav7CAT5aBMcXiQ4T/qz9gVZI6fyLAntcaQAAAAEAIf4UArYFRgAVAEBAHwoIDxMTCBEDCAMWFw0PQAwPChIPEmRZDw8GAF1ZBhsAPysAGD8rEQAzETMaGBDNERIBOTkRMxEzETMRMzEwATI3FQYGIyARESM1NzczFSEVIREUFgIdYDkdZjX+vpudSGsBPf7DW/6oFokNFAFUBFZWSOr8jPuvX2YAAAAAAgAU/+wE3QRKABcAHwBoQDYHCRQSAR0dFhIFCQkCGwsSCyAhDA8XCBwUFRReWQUBGRUBAw8VARAGFRUKAxcPChUPGF1ZDxYAPysAGD8/MxI5L19eXV9dMzMrEQAzMxESORESATk5ETMzMxEzETMzETMRMxEzMTABESERMxEzFSMRIycjBgYjIiY1NSM1MxEBMjY3IRUUFgFYAje1mZmUGgkxtHfGyY6OAaqknQL9yXcESv5OAbL+TpL9+pFPVr7Ri5IBsvw3ts9/hYEAAQA9/+wEmgRKAB8ARkAkABkMEx8CDRMJAhkZHBAJBCAhEBwOAA0ODV1ZHQ4PBRZdWQUWAD8rABg/MysRADMSOTkREgEXOREzETMzETMRMxEzMTABFhEUACMiJiY1NDY3ITUhFQYGFRQWMzI2NTQCJzUhFQOP2f7u7pLmf21q/vUB5G+HpZ2ap5BoAeQDtKz+6vH+63/rmpjcUJaOMvyesL+/spUBCC2OlgAAAAEApv/sBEgEXgAbAC9AGBUSDBsbBRIDHB0TDwgDYVkIEA8YXVkPFgA/KwAYPysAGD8REgEXOREzETMxMAE0JiMiBzU2MzIWFREUBiMiJhERMxEUFjMyNjUDkz1QOjtGO6eP5ezy37SDmpqCAwZsUROaFKGx/s/49+8BAAJv/Ze+oqa6AAEAAAAABBAESgAMABpACwEOCw0GDAILFQwPAD8/MxI5EQEzETMxMAEBIwMmJyMOAgMjAQJxAZ/A6kYSCAUeKfnBAaIESvu2AofHXBtge/1MBEoAAAEAFwAABjMESgAcACJAEBsJHR4WDwQDGwccDxMKGxUAPzMzPzMSFzkREgE5OTEwARMWFzM2NxMzASMDJiYnIwYHAyMCAicjBgYDIwECG7wfLQgoIsTNAS26mC0VBQkdLMPEfX8LCAUmuLgBMQRK/ZVlvrxpAmn7tgJKtmkokpX9lgGZAZ9XJqr9QQRKAAAAAAEAAAAABBIGHwAWACtAFAkQABYAFxgWBQUNAQkVDRJdWQ0BAD8rABg/MxI5ETMREgE5OREzMjEwISMDJicjBgcDIwE2NjMyFxUmIyIGBwcEEsDuSBQIGkTfwwHZQ76OVz81RF50IjkCZr1oar39nATPsp4RjwxmWJAAAAABAAAAAAPLBEoACAAiQA8HCgMAAQEJCgUBBgMPARUAPz8zEjkREgE5ETMyETMxMCEjEQEzAQEzAQJCtf5zzQEaARnL/ncBxQKF/hAB8P17AAEAUP4UBFwESgAXAEdAJhEZBwMKFxcDCAQBBRgZBwQFBQRkWQUPAggBAQhkWQEVFA1dWRQbAD8rABg/KxESADkYPysREgA5ERIBFzkRMxEzETMxMCEhNQEhNSEVASERFBYzMjY3FQYGIyImNQLJ/YcCTv3VAvH9uwJUNUgXQxIWTiuHfXcDR4yH/Mj+4mpbDQmJDhOrqwAAAgBQ/04EQgRKABUAHgBdQDQZCwcTHBwHBAMMCAUHHyACBRAWZFkPEB8QrxADCQMQBQsICQkIZFkJDwYFGQwFDGRZAAUVAD8zKxEAMxI5GD8rERIAORgQxF9eXSsAGBDEERIBFzkRMxEzMzEwIQYHJzchNQEhNSEVATM2NjMyFhUUIRMiBgczMjU0JgIpHiyJMf7JAk791QLx/buqWbh2aYn+rk83ZTaNniw8djl5dwNHjIf8yLCWfVj8AUZaYW0dMQAA//8AHf4UA7YESgIGAucAAAAC/9f+FAQfBEoAIQArAJNAVA0tKRYWASQFCxAkCxsJHR0bJAYhAgYsLQ8mARIGGSZdWQAZEBkgGQMQAxkZEgAGBiFeWQ8GHwYCCQMGBhIDDhsFAgMDAl1ZAw8AIgESBhIiXVkSGwA/KwBfXl0YPysREgA5GD8REjkvX15dKxEAMxI5GC9fXl0rAF9eXRESARc5ETMRMxEzETMRMzIRMxEzMTATASE1IRUBFgQVFAcWFwcmJwYhIiYmNTQ2MzIXNjU0JiMjEzI3JiMiBhUUFuwBwP2NA0b+P+sBDTNmNnVPPqn+7m+1Z7mb7N0O0M54dsh0tbpTW3wBwQHxmIP+DBD4yXVkZEJvWjqURoNacoiVNiydqf1maog5MD5LAAEAGQAAAzMGHwATAC1AFhMADwQEAAoDFBUSAQEMABUMBl1ZDAEAPysAGD8SOS8zERIBFzkRMxEzMTAzETY2NTQjIgYHJzYzMhYVFAIHEf6vzvxHnT9DnNLN38e6Auc24JT4Ni6Hc8y7p/7vV/13AP//ADUAAANPBh8ARwSDA2gAAMAAQAAAAAABABn/7AMzBhQAEwAtQBYBEgQPDxIJAxQVARISBxMABw1dWQcWAD8rABg/EjkvMxESARc5ETMRMzEwAREWEhUUBiMiJzcWFjMyNTQmJxEBsrnI4MzQnkM+n0b8ybQGFP1tVv7upr3KcocuNviS3zkC8QAAAAEAcf4XA5MEXgAWAChAFA8DCRUDFRcYBgxhWQYQABJhWQAbAD8rABg/KxESATk5ETMRMzEwASICERAAITIWFwcmIyICERASMzI3FQYCZv73AQABAlCdMzeLYqaek6aRjHL+FwGCAZgBkgGbIRqWNP69/rf+tf7IQKA7AP//AH3/7AXDBc0CJgAyAAABBwB5Ag4AAAAbQBICPyFvIa8h3yEEIQIAGB4MEiUBKzUAEV01AAAA//8ArgAABFgESgIGAcwAAAACAFz/7ARcBF4AEAAiAGVAOR4dHQIJGgIgFA4OBiAaBCMkHgcEBAddWUUEARkEAQgE6AQCEA8EARQDBAQXEREAXVkREBcLXVkXFgA/KwAYPysREgA5GC9fXl1eXV1dKxESADkREgEXOREzETMRMxEzETMxMAEgFRQhMxUjIBUUITI2NTQmJyAAERAAISImNTQ2NzUmNTQ2Ajn+9AE3Olb+xgEpva6xvAEIAR/+4f7+4f51f8/oA8eaqJO5uNDW0NCX/tf+8v7t/titlmeHJQg5uoiZAAEAcf/sBL4GHwAlAE1AKRInHAYXAAALISEjBgMmJyQjXlkkJAMJDxRhWQ8BCRphWQkQAx9hWQMWAD8rABg/KwAYPysREgA5GC8rERIBFzkRMzMRMxEzETMxMCUGBiMiABEQACEyFzU0NjMyFxUmIyIVEQcmIyARFBYzMjcRIzUhBAR4vGrt/vgBIwECbFZufjs/LztgQpKA/ouem4Np7AGgOSsiASMBEAEUASsSwISPG5UUYv7zm0j+YMfTHQEtkQAAAP//AK4AAARqBEoCBgHXAAAAA/89/hQB/AXlAA8AFwAjAF1ANRYLCwIFDxMEBR4FExgEJCUbIWNZYBsBDxsBDAMbAA8FFA4UXVkCDw4fDgIJAw4VCBBdWQgbAD8rABg/X15dMysRADMYP8RfXl1dKxESARc5ETMRMxEzMhEzMTATMxEzFSMGBiMiJjU0NjMzAzI2NyMiFRQTNDYzMhYVFAYjIiautJqaA4aHepuPilhoOS0CTnm5PS0qPz8qLT0ESvu2k66rhW16gP6oVm9jYgbLPDY2PDs4OAABABL+FAOYBEoADwA2QBwIBQwJDw0NBQQGBBARDwgFAwkJCgQKBg8EFQ0bAD8/PzMREjkRFzMREgEXOREzMzMRMzEwAQYHASMBATMBNxEzESMRNwLlTzT+otMBvf4k2gF8fbOzCQIUaTj+jQHVAnX+AGwBlPnKA06yAAAAAQCuAAADVgRKAAUAH0AOAgUFBAYHAA8FAl1ZBRUAPysAGD8REgE5OREzMTATMxEhFSGutAH0/VgESvxMlgAAAAIAcf4UBScGHwAfACwASkAnBS4qFgsQHCMfBAwWDC0uHBETGQwbAgddWQIBGSddWRkQEyBdWRMWAD8rABg/KwAYPysAGD8REjk5ERIBOTkRFzMzETMRMzEwARAhMhcVJiMiBhURIxE0NjcjBiMiAhEQEjMyFzMmJjUBMjY1NTQmIyIGFRQWA4kBAm0vLElEMbQKAw123tft7dfddw0DCv7HpJeZpIuYlwTJAVYhiRZbaflNAdgYdxKhASoBDQENAS6iFHkV/COzzCHlw93NzNIAAAAAAQAZAAADMwYfABsAUkArGQgXGxsEABMICAACDgQcHRoCAwJdWRcAAwELAwMDABYFBRAAFRAKXVkQAQA/KwAYPxI5LzMSOS9fXl0zKxEAMxESARc5ETMRMzMRMxEzMTAhESM1MxE2NjU0IyIGByc2MzIWFRQCBxUzFSMRARK6uqbD/EedP0Oc0s3fv63d3QFQlQECOOKQ+DYuh3PMu6L+6leklf6wAAAAAQA1AAADUAYfABsAUkArGxAQBRQYGAEZChYZBQQcHRcbABtdWRQAAAELAwAAGQITEwgZFQgOXVkIAQA/KwAYPxI5LzMSOS9fXl0zKxEAMxESARc5ETMzETMRMxEzMTATMzUmAjU0NjMyFwcmJiMiFRQWFxEzFSMRIxEjxd2vvuDM05xEPp9G+8Wjurq03QHlpFkBFKK7zHOHLjb4keI3/v6V/rABUAAAAwBx/+wHXgYUABgAJQAoAGdAOCMDDBwPJycJFhEUFCgSFhwDBikqJxIVFRJkWRUVESYPDyZkWQ8PCRcABg0ABiBdWQYQABldWQAVAD8rABg/KwAYPxESOTk/KxESADkYPysREgA5ERIBFzkRMxEzMxEzETMRMzEwBSICERASMzIXMyYmNREzESEVASEVIScjBicyNjU1NCYjIgYVFBYBEQECM9bs7dfddw0DCrQDE/26AlT8ThsIc8akl5mki5iXAnsCTBQBKAEPAQ0BLqIUeRUBtv42h/zIi5OnlbPMIeXD3c3M0gM9/LsDRQACAHH+FAeTBhQALQA6AIJARhUPLQMTCRc4HygxKxcXJRkDDw8AGTEfBTs8JRocIhQAABNeWQAABy0WKysWXVkrDykAGBUiNV1ZIhAcLl1ZHBYHDF1ZBxsAPysAGD8rABg/KwAYPz8/KxESADkSORgvKxEAMxESOTkREgEXOREzETMzETMRMxEzETMzETMRMzEwARYEFRQGBiMiJzUWMzI2NTQmIyM1ASERIycjBiMiAhEQEjMyFzMmJjURMxEhFQEyNjU1NCYjIgYVFBYFnOsBDIb5n++Mt8yiwNDNeQHA/bSRGwhz49bs7dfddw0DCrQDH/r0pJeZpIuYlwHTEPjJkOJ8SKRWuZudqX0B8fxOk6cBKAEPAQ0BLqIUeRUBtv42g/y6s8wh5cPdzczSAAAABABx/04ILQYUACUAMgA1AD4AkUBSOTUwFyApIzQ0HRElNQc8PDUPDQARKRcIP0AMEAQ2ZFkPBB8ErwQDCQMEEDQ5CgMAEBAAZFkQFSUzIyMzZFkjDx0SFBohABotXVkaEBQmXVkUFQA/KwAYPysAGD8REjk5PysREgA5GD8rERIAFzkYEMRfXl0rABgQxhESARc5ETMRMxEzMxEzETMRMxEzMTAlMzY2MzIWFRQhIwYHJzY3IScjBiMiAhEQEjMyFzMmJjURMxEhFQEyNjU1NCYjIgYVFBYBEQETIgYHMzI1NCYFCqpXuXdpif6uxyghiR4T/jkbCHPj1uzt1913DQMKtAMT+wCkl5mki5iXAnsCTKI4ZTaNniyLrJp9WPxUXjlQKZOnASgBDwENAS6iFHkVAbb+Nof8vrPMIeXD3c3M0gM9/LsDRf2IWmFtHTEAAgAhAAAFtgVGAB0ALwBhQDISAAgGDSwsBhgqACEhKigGBDAxCw1AJCEoGwAYEBVdWRAQCg0HKw0rZFkNDwQvXVkEFQA/KwAYPysRADMRMxg/KxESADkREjkaGBDNERIBFzkRMxEzETMRMxEzETMxMAEUBiMhIBERIzU3NzMVITYzMhcHJiMiBhUUFhcWFgEyNjU0JicuAjU0NyERFBYzBbbg1v3+/r6bnUhrAiVUWLmlPqeGb3Rkt76L/k6Je3Sam347If6qYEwBL5aZAVQCalZI6vwUSo9GRz48T0ZHkP71QUtAWjs8VWpMRzz9mmRhAAAAAAIAIf4UBE4GHwAkACwAXEAwHi4EKhIQFyoqEAAYJwoQCi0uGyFdWRsBFRUUFxEpFylkWRcPDiVdWQ4WAgddWQIbAD8rABg/KwAYPysRADMRMzMYLz8rERIBOTkRMzMzETMRMxEzETMRMzEwBRAhIic1FjMyNjU1BgYjIBERIzU3NzMVITUQITIXFSYmIyIGFQEyNxEhERQWA2T+8l0yLztINxlmM/6+m51IawE/AQ5cNBE+HEg3/rlVPv7BW5b+qiGJFllsmwoSAVMCf1ZI6vx/AVYhiQgOWWv7uBQDK/2GX2YAAAIAIf/sBo8FRgAvADoAkkBPKwgbJRQSGR0dEgAlAiMzDQg4OA0jJRIFOzwzDTAQIwIgBQUwXVmQBQEPBR8FAgkDBQULKCguYVkoEBcXFhkUHBkcZFkZEDUgECBdWQsQFgA/MysRADMYPysRADMRMzMYLz8rERIAORgvX15dXSsREgA5ORESOTkREgEXOREzETMRMxEzETMRMxEzETMRMzEwARQXNjYzMhYVFAYjICcGBiMgEREjNTc3MxUhFSERFBYzMjY3JjUQADMyFhcHJiMgASIGBxYzMjY1NCYD3w9frF2Oq8eh/vWCecBj/r6bnUhrAT3+w1JDS5RtLQEL91SbMjiLYv68AWs+gmVSrldpUQIhT0tGO4dzhZ2jWkkBUwJ/Vkjq/Iz9hlhtO1JwowEUASsiGZY0/bIwR3tNPDA5AAAAAAEAH/4UBj8GHwAxAGdANx4PJRMTGBQnDw8QLwcHAhAUFgUyMycQKxAUFRwhXVkcASsLXVkrEBglFhIlEmRZJQ8ABV1ZABsAPysAGD8rEQAzETMYPysAGD8rABg/MxESORESARc5ETMRMxEzETMzETMRMzEwASInNRYzMjURNCYjIgYVESMRIREjESM1NzU0NjMyFwcmIyIGFRUhFzM2NjMyFhURFAYFGVc7Pj2Jdn2nm7T+8bTAwK+2aWwwXUZbWAGgHQo2tGrJyI/+FBmRFKwDa4WBuNT9xQO+/EIDvlQ+P8jIJY0eeIJHllZUv9L8j5quAAABAK7/7ATwBhQAJgBKQCYaAAoGBgcgFQAODhUHAycoEQ4VIwAgCAAHFRgdXVkYEAMMXlkDFgA/KwAYPysAGD8/ERI5ERI5ERIBFzkRMxEzETMRMxEzMTABFAQjIicVIxEzERYzIDU0JicuAjU0NjMyFwcmIyIGFRQWFx4CBPD+/fXqrLS0ztIBQHeYm3473MC7oz2phHB0ZLeJgz4BL56lVkIGFPrVbKJBWjo8VWpMh5xKj0ZHPjxPRjNYbgAAAAACAK4AAASDBhQACAALAEBAIQQKCgEGAAALBwEEDA0CAAYJBAQJZFkEDwoHAQEHZFkBFQA/KxESADkYPysREgA5GD8REgEXOREzETMRMzEwISERMxEhFQEhAREBBIP8K7QDE/26AlT83wJMBhT+Nof8yAMz/LsDRQACAAAAAAQSBbYADAAZAHJASxgLCxsRBAQTBg4BFgkJAQYDGhsUFw4TFg0GEBEABgkBCgcGBCAEUARwBIAEoASwBNAEBy8QXxB/EI8QrxC/EN8QBxAEEAQDEQMDFQA/PxI5OS8vXV0RFzkREhc5ERIBFzkRMxEzETMyETMRMxEzMTAhAwMjAzMTEzMTEzMLAyMDMxMTMxMTMwMCrqaqlcmPiKyPpI+NypqmqpXJj4isj6SPjcoCAv3+ArD9+gIG/fgCCP1QAwYCAv3+ArD9+gIG/fgCCP1QAAACAK4AAAQKBbYABwAPAERAIQ4GBg8HCgICCwMHAxARCw8IQAgNYlkIAwAFYlkAAAMHFQA/MzMvKwAYPysAGhgQzTIREgE5OREzMxEzETMzETMxMBMhESMRIREjESERIxEhESOuA1y0/gy0A1y0/gy0Ad3+IwE1/ssFtv4jATX+ywAAAAAB/9f+FARWBF4AIAA6QB0FDCAVEhkWIBYhIhodEw8WGwgDXVkIEB0PXVkdFgA/KwAYPysAGD8/EjkREgE5OREzMzMRMzIxMBM0JiMiBzU2MzIWFREUFjMyNjURMxEjETQ3IwYGIyImNbg8Oj0uLF+AiHd/ppy0tAoMMbRx0MIDSEg7E44Yj4X+QYWDt9gCOPnKAepaQFBaxssAAAAB/9f+FAUhBF4AKQBHQCUGKxohFCcKAA0NChQDKisOESgPHRhdWR0QESRdWREWCANdWQgbAD8rABg/KwAYPysAGD8SORESARc5ETMRMxEzMhEzMTAFFBYzMjcVBiMiETU0NyMGBiMiJjURNCYjIgc1NjMyFhURFBYzMjY1ETMEVis5QSYtY+8KDDG0cdDCPDo9LixfgIh3f6actJNuVxaJIQFWlFpAUFrGywHJSDsTjhiPhf5BhYO32AI4AAEAngGHA38GFAAUAC9AFg0LBwcIFAAIABUWDQgQCQAACFQDEFcAPzM/Mz8REjkREgE5OREzETMRMzMxMAERNCMiBhURIxEzERQHNjYzMhYVEQLPonRpsrIIJoNMoKIBhwIRsoSR/lIEjf6kPDA8S5Kh/ecAAQCeAYcDfwYdABwAM0AYBQwKGxscExQcFB0eDBwPBwIBFBxUFw9XAD8zPzM/MxESORESATk5ETMRMxEzMzMxMBM0MzIXFSYjIhUVFAc2NjMyFhURIxE0IyIGFREjntEySDsrMwgohUigorCidGmyBT3gGZEaR448KDlGkqH95wIRsoSR/lIAAAL/xwAhAVwF9AAMABgANUAeAhMLCw0ICBkaBQAAEABAAFAABAAWbxB/EAIQgAlWAD8azF0yL10zERIBOREzMxEzMjEwNyInNRYzMjY1ETMRFAM0NjMyFhUUBiMiJlRJREkyMiqyvTwrLDY1LSs8IRaMFzc4A6z8WP4FbjUwNi8sNjEAAAEAngGHAqwE1QAPACVAEQ0JCQoKAhARDQoLVgpUBQBXAD8yPz8RORESATk5ETMRMzEwATIXByYjIgYVESMRMxc2NgIzOz4UPi9dfrKZCzBvBNUMmhCRbP5FA0CuZ1UAAAEAMQF5Aj8ExwAPACFADwsIDQINEBENCVYMVAUAVQA/Mj8/ORESATk5ETMzMTATIic3FjMyNjURMxEjJwYGqjs+FUIqXX6ymQowdAF5DJoRkmwBu/zArmVXAAAAAQAxACEC7gTHABsAOEAfBx0ZCwAODgsTAxwdDhEaAwAJEAlACVAJBAkaVhYRVQA/Mz8vXTMREjkREgEXOREzETMRMzEwARQWMzI2NxUGIyIRNTQ3BgYjIic3FjMyNjURMwI/GycZPxUvXdUPOmtNOz4VQipdfrIBK0I/EwyHIQEIiUFCaFQMmhGSbAG7AAAAAgCeAYcDmATHAA0AFQBKQCkBBQIODBISCQUOCQ4WFwIRmQwBiAwBDwwfDC8MAwwMCQAKVrcSARIJVAA/M10/MxI5L11dXTMzERIBOTkRMxEzETMRMxEzMTABMwMWFhUUBiMhETMRMxM0IyMVMzI2AsnP8Fpcp5L+ebKmurSsx1JHBMf+qht7WHaGA0D+wv76e+xAAAEAJwGHBPIExwAYACJAEAgXGRoMEwMDBxYPCFYAB1QAPzM/MzMSFzkREgE5OTEwAQMmJwYHAyMDMxMWFzY3EzMTFhc3NxMzAwNEkB0MDxiVveu6dRUQBx6TtpAeBxIVdrfuAYcBz15GZkD+MwNA/klJXzdZAc/+MWcnUlQBt/zAAAAAAAEAFAAhA1wExwATAC9AGwANBxMNAxQVBBM6EwETAAsQC0ALUAsECwYAVgA/Mi9dOV0RMxESARc5ETMxMBMzExYXNxMzAQYGIyInNRYzMjc3FMG0JBE2qr7+ljacblE0SShxMyYEx/4tX0WmAdH8Wol3EIcOhWQA//8AGQPBAU4FtgIGAgcAAP//ABkDwQLHBbYCBgILAAD//wAZA8EBTgW2AgYCBgAA//8AGQPBAVAFtgIGAgkAAAAB/5MEmgBzBkoADQAtQCANDwAfAC8AAwAAByAGAQ8GLwZPBl8GfwafBs8G7wYIBgAvXXEzMy9dMjEwAzIWFRQGIzUyNjU0JiNtaHh6ZjJBOzgGSnVkZXJmOzY2OgAAAAAB/5MEmgBzBkoADQAtQCAHDwYfBi8GAwYGDSAAAQ8ALwBPAF8AfwCfAM8A7wAIAAAvXXEyMi9dMzEwEyImNTQ2MxUiBhUUFjNzZnp4aDJBOzgEmnRlYXZnOjY2OwAAAAABABADtgInBiEAEgAqQBUSAA8DAwAKAxMUEQFADxJIAQEGDAAAL8QyOS8rMxESARc5ETMRMzEwEzU2NTQmIyIGByc2MzIWFRQHFaT4UkgvbCotboiFnP4Dtqw4kztKIh1qRINvwU1r//8AIQO2AjgGIQBHBK4CSAAAwABAAAAA//8AZgA3BCsEJgAHAB8AAP9JAAD//wBmADcEKwQmAAcAIQAA/0kAAAABAFAASgQ/BA4ABgAYQAkAAwcIBQEEAAEALy8yEjkREgE5OTEwNwEzAQcBAVABqGYB4Z/+k/69SgPE/D4CAwT8/AAA//8AUABKBD8EDgEPBLIEjwRYwAAAFUAPADACbwKPAq8CzwLvAgYCABFdNQD///+uBMUAUgYUAgYE7gAA////rwTZAUsGIQAHAHb+LAAAAAD///63BNkAUwYhAAcAQ/00AAAAAP///67+YABS/68BBwTuAAD5mwActADgAwEDuP/AsxITSAO4/8CzDRBIAwARKytdNf///tL+1AEy/2UBBwFN/af5+wARQAsAAAAgAHAA4AAEAAARXTUAAAD///8z/l4Az/+mAgYE9gAA////M/5eAM//pgIGBPcAAAAC/0oAAAC2BEoAAgAFACFAEw8FHwUCAAEQAQIFAQUBAgQVAg8APz8SOTkvL11dMTATAwMBIRO2trYBbP6UtgRK/uMBHfu2AR0AAAAAAf9KAy0AtgRKAAIAFEAKAAEQAQIBAQMCDwA/EjkvXTEwEwMDtra2BEr+4wEdAAD///+TAfcAcwOnAAcErAAA/V0AAP///5MB9wBzA6cABwStAAD9XQAA////Kf5WANf/ngIGBP0AAP///yn+VgDX/54CBgT+AAD///8p/i0A1//HAgYE/wAAAAH/Kf68ANf/NwADAAixAwAALzIxMAchFSHXAa7+Usl7AAAAAAH+yQFcATcDFAANABVACgQJCwkPAF8AAgAAL10yMi8zMTADFxYWMzI3FwYjIgMHJzkiFz4vUVQlcH23S1olAxR6VUs3h04BAhiLAP///y8EkQDRBjMCBgUcAAAAAgAUACMDWgTHABQAIAA+QCUbCQAMAxUUFRgMCQ0GISIQGA0eQAZQBgIABhAGQAZQBgQGEw1WAD8zL11xMxI5ORESARc5ETMRMxEzMTABFhYVFAYjIiY1NDY3ATMTFzY2EzMBNCYnBgYVFBYzMjYCGy40b1hYbi40/sK9xiEKF8W8/o8ZGhoZIBMWHQIMWIk/VnNwWT2EXwK7/jdSHT0BwfwpGFQtLVQYHCYnAAEAngGHAVAGFAADABZACQABAQQFAgABVAA/PxESATkRMzEwASMRMwFQsrIBhwSNAAEAagF5AuEE1QAhACZAERYABRwRAAoRCiIjGRRXCANVAD8zPzMREgE5OREzETMzETMxMAEUBiMiJzUWMzI1NCYnLgI1NDYzMhcHJiMiBhUUFhcWFgLhtamobqJ4rE1vf18vspiWizuJZUpLQoSUcAJzd4M5nE5oKjspMEBRO2p5PYY6LCYlNDI4cwAAAQArAYcDZATHAAsANEAZAwkJBgELBgAFBwcACwMMDQMJCwQBVggLVAA/Mz8zEjk5ERIBFzkRMxEzETMRMxEzMTABATMTEzMBASMDAyMBYP7ZycfEx/7ZATfI09XJAy8BmP7lARv+aP5YASv+1QAAAAEARAGHAscGHQATACJADwkTDwQTAAQAFBUNBwEAVAA/PzMREgE5OREzETMRMzEwAREmJjU0NjMyFwcmJiMiFRQWFxEBZIuVrqqjiEI4ejGolIkBhwHhRct4jaBhfyctpWedKv3JAAAAAQCgAAACuAWBAAUAGEAJAAEDAQYHAQMEAC8zLxESATk5ETMxMCEjESE1IQK4h/5vAhgE+ocAAAABAKAAAAK4BYEABwAgQA0ABQEDAQgJAwQEBgEGAC8vEjkvMxESATk5ETMzMTAhIxEhNSERMwK4h/5vAZGHA6iHAVIAAAEAoAAAArgFgQAHACRAEAAFAQMBCAkDDwQBBAQGAQYALy8SOS9dMxESATk5ETMzMTAhIxEhNSERMwK4h/5vAZGHApyHAl4AAAEAoAAAArgFgQAHACBADQAFAQMBCAkDBAQGAQYALy8SOS8zERIBOTkRMzMxMCEjESE1IREzAriH/m8BkYcBVocDpAAAAQCgAAACuAWBAAUAGEAJAAMBAwYHAgEEAC8vMxESATk5ETMxMCEhNSERMwK4/egBkYeHBPoAAAABAKAAAAK4BYEABQAYQAkCBQUEBgcCBQAALy8zERIBOTkRMzEwEzMRIRUhoIcBkf3oBYH7BocAAAEAoAAAArgFgQAHACRAEAIGBgcHBAgJBQ8CAQICBwAALy85L10zERIBOTkRMxEzMTATMxEhFSERI6CHAZH+b4cFgf2ih/1k///+p/5pAVv/sQEHAUz9o/mQAB20ANAKAQq4/8CzEBJICrj/wLQKD0gKIwA/KytdNQAAAP///lkEygGrBlwBBwIF/l0GkQAbQBIBAAFAFBdIAUAOEEgBQAkLSAEAESsrKzU1AP//ABkDwQLHBbYCBgILAAAAAf6q/hQBVv/bAAYAGLUFAwIcBgO4/8CzDxtIAwAvKzM/EjkxMAUBIwEzExMBVv7dZP7borS2Jf45Acf+7gESAAH+qv4UAVb/1wAGABi1BQEEABwBuP/Asw8YSAEALys/MhI5MTABATMBIwMD/qoBI2QBJaK0tv4UAcP+PQEP/vEAAAAAAf8A/hQBAgAvAAYAGEAPABwPAx8D3wMDA0APEkgDAC8rXT8xMAElNSUVBQUBAv3+AgL+sgFO/hTcZNuNf4EAAAH/AP4UAQIALwAGABhADwMcDwAfAN8AAwBADxJIAAAvK10/MTAlBRUFNSUl/wACAv3+AU7+si/bZNyOf4EAAAAB/zsCRADFA4sACQAMtA8FAQUBAC/NXTEwEyMmJic1MxYWF8V3Q7Ie3BloLQJENMg3FDa5PgAAAAL+kwI/AW0DhwAIABIAErcMDwIBAgISCAAvMzMvXTMxMAE2NzMVBgYHIyU2NjczFQYGByP+k1Nc2yGuQnkBUCZoINwerUZ5AlpqwxQ7xjMbMLZHFDbGOAAC/pMCPwFtA4cACQATABK3BQ8PAQ8PAQsALzMzL10zMTABIyYmJzUzFhYXBSMmJic1MxYWFwFteUGwINsmYyb+sHk/tB7cI2ohAj8zyDkUUK8uGzDONhRNuCgAAAH+mv59AWj/hQAXABtADRQMCREAgAUPDB8MAgwAL10zGt0yxBDGMTATIi4CIyIGByM2NjMyHgIzMjY3MwYGlihPTEcfLTIOZghxWilRTUUdLC0RaA1u/n8jKyM1PnyKIykjMz58igD///+CAZEAgAYUAAcAHf7vAa4AAAAB/1YEHwCqBXMABQATQAkABWAFAgUFAwAALzIyL10xMAMhFSMVI6oBVOdtBXNt5wAB/1YEHwCqBXMABQATQAkAAmACAgICBAUALzMzL10xMBMRIzUjNapt5wVz/qznbQAB/1YB5wCqAzsABQAMswEBAwAALzIyLzEwAxEzFTMVqm3nAecBVOdtAAAAAAH/VgHnAKoDOwAFAAyzBAQCAQAvMzMvMTATITUzNTOq/qznbQHnbecAAAAAAf43/lYByf+oAAcAF0ALBAABAQEBBiACAQIAL10zMy9dMzEwASERMxUhNTMByfxucwKqdf5WAVLLywAAAf43/lYByf+oAAUADLMEAYACAC8azTIxMAEhETMVIQHJ/G5zAx/+VgFSywAB/i/+FAHRAJoACQAOtAAIAwgFAC8zMxEzMTATATUBFQUhFSEFMf3+AgL+8gKu/VIBDv4UARFkARF5i3uOAAD///6nBNkBWwYhAAcBS/2jAAAAAP///tIE2QEyBWoABwFN/acAAAAAAAH+VgYrAawGvAADABlADwPvAAEAQBATSABACQxIAAAvKytdMjEwASEVIf5WA1b8qga8kf///sIE2QE/BewABwFO/aMAAAAA////lwUAAGoF5QAHAU/+9wAAAAD///7gBQwBIAXXAAcAav2vAAAAAP///QQEuP53BpECBgJjAAD///8gBNkA4gaJAAcBUP2zAAAAAP///pAE2QFvBiEABwFT/bEAAAAA///+pwTZAVsGIQAHAUz9owAAAAAAAf+uBMUAUgYUAAMAD7ZgAgECgAMAAD8azV0xMBMRIxFSpAYU/rEBTwAAAAAC/woExQD2BhQAAwAHABRACQZgAgECgAcDAAA/MxrNXTIxMAMRIxEhESMRUqQB7KQGFP6xAU/+sQFPAAAC/sEE2QE9BsEADQAZACVAFhEXCg8DHwMvAwMDAwegAAEPAF8AAgAAL11dMjIvXTPEMjEwAyImJzMWFjMyNjczBgYDNDYzMhYVFAYjIiYGjaMJbgdUdGRhCnAKrO89LTA4Oi4tPQTZiolGPD5EgZIBdTw3PjU2PTgAAAAAAf7BBNkBPQXsAA0AI0AWBw8AHwAvAJ8ABAAABKALAQ8LXwsCCwAvXV0zMy9dMjEwEzIWFyMmJiMiBgcjNjYEjaMJbglVcWdgCHALrAXsiolJOEBBgZIAAAH/ZAPBAJoFtgAGAAmyBgMDAD/NMTADJzYTMwYHjw00fIZCJQPBFscBGP73AAAAAAH/ZgPBAJwFtgAGAAmyBAYDAD/GMTATFwYDIzY3jw00fIZCJQW2Fsf+6P73AAAAAAH/ZAPBAJoFtgAGAAmyAwYDAD/NMTATFhcjAic3MyVChnw0DQW29/4BGMcWAAAAAAH/ZgPBAJwFtgAGAAmyBAYDAD/GMTATFwYDIzY3jw00fIZCJQW2Fsf+6P73AAAAAAH/M/5eAM//pgAJABVAC2ABAQGAEAUgBQIFAC9dGs1dMTATIyYmJzUzFhYXz3lLsyXXHHYz/l48vzgVObo8AAAB/zP+XgDP/6YACQAVQAtgCQEJgBADIAMCAwAvXRrMXTEwAzY2NzMVBgYHI801bSHZLLo/d/53RKw/FUDCMQAAAf9M/kIAtP/HAAcAF7MHAAAFuP/AtAoNSAUCAC/NKzkvMzEwBzM1MxEjNSO04YeH4b6F/nuFAAAB/0z+QgC0/8cABwAXswEGBgO4/8C0Cg1IAwQAL80rOS8zMTATIxUjETMVM7Thh4fh/seFAYWFAAH+kwTRAW0GagAFABNACgUADwNfA/8DAwMAL13EMjEwASERIxEh/pMC2oj9rgZq/mcBHwAB/y0EcQDdBhQACAAMswMDAAgALzMzLzEwAzY2NTMXBgYH03Bzvw4izsAE8A+Wfxa7vBYAAf+T/jMAc//jAA0AErcNIAABAAAHBgAvMzMvXTIxMBMiJjU0NjMVIgYVFBYzc2d5eGgyQTs4/jN2Y2F2Zjs2NjoAAAAB/yn+VgDX/54ABwAZQAwBBQUABAEEBBAHAQcAL10zL10zETMxMBcVMxUhNTM1RJP+UpNizXt7zQAAAAAB/yn+VgDX/54ABwAZQAwAAAEABgIAAhADAQMAL10zMxEzL10xMAM1IzUhFSMVRJMBrpP+Vs17e80AAAAB/yn+LQDX/8cACwAtQBu/CgEAChAKAhAKIAoCCs8DAQMKCAADBR8BAQEAL3EzMzMyMi9dL11xXTEwAzUzNTMVMxUjFSM115OIk5OI/rx7kJB7j48AAAD///7S/tQBMv9lAQcBTf2n+fsAEUALAAAAIABwAOAABAAAEV01AAAAAAH+lv4ZAAAAVgALAA+1CAAFa1kAAC8rABgvMTADIic1FjMyNREzERDwPjwuOGKi/hkYlhNrATf+0/7wAAEAAP4ZAWoAVgALAA21AAdrWQADAC8vKzEwEyIRETMRFDMyNxUG8PCiYjguPP4ZARABLf7JaxOWGAAA///+4P6aASD/ZQEHAGr9r/mOABdADwEALwkBAAk/CVAJjwkECQARXXE1NQD///8g/jAA4v/gAQcBUP2z+VcAErIBAAm4/8C0DjJICRMAPys1NQAA////fv47AIT/gwAHAjn+DwAAAAD///85/hQAzgAAAAcAev8cAAAAAP///1n+PQCrAAAABwFR/zYAAAAA////rv5gAFL/rwEHBO4AAPmbABy0AOADAQO4/8CzEhNIA7j/wLMNEEgDABErK101AAH+k/5CAW3/ngAHABC2AwcFLwABAAAvXTIvMzEwBSERIzUhFSP+kwLaiP42iGL+pOHhAAAB/n3+lgGF/4MAFgAmtBMNDQMHuP/AQA4JDEgHChYHAw8QHxACEAAvXRczLyszMxEzMTAFFAYjIicGIyImNTMUMzI2NTMUMzI2NQGFd2pvNTVvaHdvcDNAYnMzQH1xfEdHenOHQkWHQkUAAP///qf+aQFb/7EBBwFM/aP5kAAdtADQCgEKuP/AsxASSAq4/8C0Cg9ICiMAPysrXTUAAAD///6n/mcBW/+vAQcBS/2j+Y4AHbQA0A0BDbj/wLMPEkgNuP/AtAoOSA0jAD8rK101AAAA///+wv6GAT//mQEHAU79o/mtABe3AA8AAZAAAQC4/8CzCQ5IAAARK11xNQD///7B/oQBPf+XAQcE8QAA+asAF7cADwABkAABALj/wLMJDkgAABErXXE1AP///oj+iAF4/5ABBwFS/Yb5rwAaQAsADwAfAC8ArwAEALj/wLMJDkgAABErcTUAAP///tL+1AEy/2UBBwFN/af5+wARQAsAAAAgAHAA4AAEAAARXTUAAAAAAf41/sUBy/9IAAMACLEBAgAvMzEwASE1IQHL/GoDlv7FgwD///5Z/jkBq//LAAcCBf5dAAAAAAAB/qwBpgFOAq4AFwAfQBEUDAkRAAAQACAAoAAEAAAFDAAvMzMvXTLEEMYxMBMiLgIjIgYHIzY2MzIeAjMyNjczBgaJJkhFQR4sKg1oC2VVKEtFPx0qKg5nC2QBqCUrJTs8eowlKyU7PHiOAAAB/pMB/gFvAo0AAwAIsQMAAC8yMTABIRUh/pMC3P0kAo2PAAAB/R8B/gLhAo8AAwAIsQECAC8zMTABITUhAuH6PgXCAf6RAAAB/tcBdwErAysAAwAIsQMBAC/NMTABARcB/tcCCEz99gHyATl9/skAAf5q/4kBlgYQAAMACbICAwAAPy8xMAEBIwEBlv1wnAKQBhD5eQaHAAH/k/4zAHP/4wANABK3ByAGAQYGDQAALzIyL10zMTAHMhYVFAYjNTI2NTQmI21oeHhoMkE7OB11ZGJ1Zzo2NjsAAAAAAf6T/kIBbf+eAAcAFUAKBG8BzwECAQEGAgAvMzMvXTMxMAEhETMVITUzAW39JogByoj+QgFc4uIAAAAAAv81/i0Az//HAAMABwAXQAwEEAEgATABAwEBBwIALzMzL10zMTATIREhATM1I8/+ZgGa/tm0tP4tAZr+08AAAAH+ff6WAYX/gwAWACSyCwARuP/AQA4JDEgRDRMTBw8DHwMCAwAvXTMzETPEKzIyMTABNDYzMhc2MzIWFSM0IyIGFSM0IyIGFf59d2pvNTZuaHducTNAYnMzP/6WcXxISHpzh0JFh0JFAAAAAAH/LwSRANEGMwALACJAGW8LnwsCDwsfCy8LTwtfC38LzwvfC+8LCQsAGS9dcTEwAzcXNxcHFwcnByc30VZ7eVh7e1h5e1Z5BdtYe3tYeXtWeXlWewAAAAAB/30EnACDBu4AFwAbQBMPDwNPA18DfwOvA78DzwPvAwgDAC9dxDEwAzQ2MxUiBhUUHgIVFAYjNTI2NTQuAoGScjw7JSslkHQ8OyUrJQZGS11nLhwVNT5HKExeaCwcGTg+RQD///5XBhwBqQeuAQcCBf5bB+MAFUAOAQAvBj8GbwZ/Bu8GBQYAEV01NQAAAP///rcE2QBTBiEABwBD/TQAAAAA////rwTZAUsGIQAHAHb+LAAAAAD///6IBNkBeAXhAAcBUv2GAAAAAP///34EbgCEBbYBBwI5/g8GMwAHsgADAwA/NQD///7BBNABPgZ4AQcBVf2x/8QAEUAJAgEAAkASG0gCABErNTU1AAAA////sf49AND/gQAHB5f9ygAAAAAAAf5CBLwBvgYZAAcAFUAKBWAAcAACAAADBwAvMzMvXTIxMAEhESM1IRUj/kIDfIf9kocGGf6j4uIAAAD///5X/jkBqf/LAAcCBf5bAAAAAP///wr+YAD2/68BBwTvAAD5mwAetQEA4AMBA7j/wLMSE0gDuP/Asw0QSAMAESsrXTU1AAAAAf9C/hQAvv+FAAUAF0AMXwMBAwMFDwAfAAIAAC9dMjIvXTEwByERIzUjvgF8h/V7/o/2AAH+hwSNAXcGLQAbADxAJAIFAAcTEBUOCxgHERgDDw4fDi8Orw4EDhUOCwMEoAABDwABAAAvXV0XMi9dFzMvLxESOTkREjk5MTATIicHJzcmIyIGByM2NjMyFzcXBxYzMjY3MwYGmDlbTFxKLRkxMQ5pDXNhOkxFXEMwJDAxD2cMdgTbL304eBM7PHqMJ3U3cxk7PHuLAAAAA/6HBJ4BdwdkABcAIwAvAEBALC0nGyEJFAUUJwMPDB8MLwyvDAQMEQwJIQSfAAFfAG8AfwCfAM8A7wD/AAcAAC9dcRcyL10XMy8vLzMvMzEwEyIuAiMiBgcjNjYzMh4CMzI2NzMGBgU0NjMyFhUUBiMiJhM0NjMyFhUUBiMiJpgrU09JIjExDmkNc2EtVU5IIDAxD2cNcf6nOCguMjomKDgCOCYuMjomJjgFfyUrJTs8eowlKyU7PHiOezYuNi41MTECMTYuNi41MTEAAv6oBNcBWAbhABcALwBKQC8hLCwd3yQBJEAJDUgkKSEkAw8YHxgCGBQJFAUMQAkNSAwRCQwYBKAAAQ8AXwACAAAvXV0XMi8rMzMvLy9dFzMvK10zMy8vMTATIi4CIyIGByM2NjMyHgIzMjY3MwYGAyIuAiMiBgcjNjYzMh4CMzI2NzMGBpEnTUlGHyYqD2gKaFUqUElDHismDmYLZVcnTUlGHyYqD2gKZFkqUElDHismDmYKZgTZHyQfLDhweh8lHzYvcHoBHx8kHyw4a34fJB82Lm57AAAAAf6a/j8BZv+4AAkAErcGCQMDAQQDAgAvFzMRMzMxMAElFTM1BQU1IxX+mgEAzAEA/wDM/vy8f3+8vX9/AAAAAAH/Rv4UAL7/zQAGABK3BQICIAABAAMAL81dOS8zMTADNSM3FyMVQni8vHj+FPbDw/YAAAAY/SUAAALbBbYABQAJAA0AEwAZAB0AIQAnAC8ANwBBAEkAUwBdAGcAcQB5AIMAjACWAJ4AqACwALoAz0B3DBwSCxsmGCQ2Mg8yAT8yTzJfMgOmuLihsz+zT7MCW29vVmp2fn5yekI4OEY8iJGRhI0QjSCNAlFlZUxgARGdr6+ZqxCrIKsCLiowKkAq4CoDJDKzano8jWARqyoqqxFgjTx6arMyJAsSFiAmJh8VIwMHDw8IBBIALzMzMxEzMy8zMzMRMzMSFzkvLy8vLy8vLy8vL10RM3ERMzMRMxEzETMzETNxETMzETMRMzMRMxEzMxEzETMzETNdETMzETNdcREzETMQxDIQxjIxMAEjNSM1IQUhNSEBIxEzASMVIxEhASE1MzUzJSMRMwEhNSEFIREzFTMBNDMyFRQjIhE0MzIVFCMiASI1NDMyFhUUBiEiNTQzMhUUAzQzMhUUBiMiJhE0MzIVFAYjIiYBNDMyFRQGIyImETQzMhUUBiMiJiUyFRQjIjU0ITIVFCMiJjU0NgEyFRQjIjU0NiEyFRQjIiY1NDYlNDMyFRQjIhE0MzIVFAYjIiYBNDMyFRQjIhE0MzIVFAYjIiYC22zTAT/9x/68AUQCOWxs+4nRbgE/BHf+wdNs+rhubgMP/rwBRP3C/sFu0QFlNzc3Nzc3Nzf+eTg4GxwcA2w4ODf2ODcfGBkfODcfGBkf/X03OB8ZGB83OB8ZGB8DGzc3OPz8ODgbHBwDVzc3OBz84Dg4GxwcAi43Nzc3NzceGRke/qA3Nzc3NzceGRkeBHfRbm5u/IUBQgHL0QE/+kpv0/kBQvyDb28BQtMEKzc3OPy7Nzc4Ab83Nx4ZGR43Nzc3AXc3NxwcHP2dNzccHBwCmzc3HBwc/Z03NxwcHOI3Nzc3NzceGRkeAWE4NzcZHzg3HhkZH7Y3Nzf8+zg4GxwcA1c3Nzf8+zg4GxwcAAAAAf9UBLgApAZSAAwACrIMwAYALxrOMTADFhcVBgcjNTY3Jic1luBaesAWKZNpUwZScBmMHmdpHEgzNGb///8hBMMAAQZzAAYErY4pAAL+wQTZAT0GuAALABkAMUAgEAAXEBcgFwMXFxMPDB8MXwyvDAQMDAOgCQEPCV8JAgkAL11dMzMvXTMzL10zMTADNDYzMhYVFAYjIiYTMhYXIyYmIyIGByM2Nmo9LTA4Oi4tPW6QoAluCVVxYmQJcAusBUw8Nj01Nj04AaeLh0k4PUSAkgAAAAAB/z3+NwDD/7wACwAHsAsAGS8xMAc3FzcXBxcHJwcnN8NWaWRhZWdWaWRhZZpWZmRgZGlWZ2VhZAAAAAAB/1T+FACk/64ADAAXQA4QACAAMAADAE8FXwUCBQAvXcRdMTATJic1NjczFQYHFhcVjdpfeMEXLJF1SP4UbR2LHmdoHkc6LGcAAAAAAf9U/hQApP+uAA0AF0AOEAcgBzAHAwdPDV8NAg0AL13GXTEwBxYXFQYGByM1NjcmJzWW02c0sFYWKZNpU1JqH4sNSy5pHEgzNGYAAAL+h/4UAY//rgAPAB0AI0AUCwMJDxYDEBYgFjAWAxZPEF8QAhAAL13EXcYQxjIROTEwAzY2NzMWFhcVIyYnBwYHIwEWFxUGBgcjNTY3Jic1DB1ZFYsVUh5oLDoYLCFo/qrVZC+rXxcnlm5P/jM55ENU2jIYPag8czYBk2ofiwxIMmkaSjYxZv//AAAEwwDgBnMABgSsbSn//wEGBQAB2QXlAAYBT2YAAAH/J/4UANn/sgAOAB5AEw4CCwUIBwYQCSAJMAkDCU8AAQAAL13EXRc5MTAHMwc3FwcXBycHJzcnNxdIkBt/LY9ocz8/c2iPLX9Oj0WHFGNWhIRWYxSHRQAD/nf+FAGH/8UAEwAfACsALkAZDAIOFykpChAOAQ4cHSMjDgRgAHAAgAADAAAvXTIyMhEzP10zMxEzETk5MTAHMhc2MzIWFRQGIyInBiMiJjU0NgUUFjMyNjU0JiMiBgc0JiMiBhUUFjMyNqpzNzVzZ3h4Z3M1OXFneHgBRkAzODtCMTJBbEEyMkE7ODNAO1BQdWJldVJSdWViddc2PT02Njw8NjY8PDY2PT0AAf89BMUAwwZEAAcAPrNAAgECuP/AQCUJDEgCzwXfBQIFAgQFBE8HAQ8HHwcvB08HXwd/B88H7wf/BwkHAC9dcTMzETMvXS8rcTEwEwcjNyM3MwfDRmkl/EZpJQW69Yn2igAAAAH/VP4pBX//qgAMABdADAlQA2ADoAMDAwMGAAAvMjIvXTMxMAEgJCczFiEyJDczBgQCcf7R/mdVj5wB8OgBSk+PYf5y/inGu+56dL7DAAD///9UBLIFfwYzAQcFOwAABokAFUAPAA8ALwBfAH8AvwDPAAYAABFdNQAAAAAB/0IE1wQOBWgAAwAZQBEBDwIvAl8CfwKPAp8CzwIHAgAvXTMxMAEhNSEEDvs0BMwE15H///9C/tIEDv9jAQcFPQAA+fsAD0AJAAACIAJwAgMCABFdNQAAAf9UBNsFfwXjABQALUAbBwATCxAPAB8ALwCvAAQAAAVgCwGgCwEPCwELAC9dXXIzMy9dMhDEEMYxMBMyHgIzMjczBgYjIi4CIyIHIxK+c9HN1HbfH2gWtZ11083Tdd8faC4F4SYvJn2BhSYvJn0BBgAAAAAB/1QEsgV/BjMADAAvQCIGUABgAKAAAwAABB8KPwpPCm8KBA8KLwpfCn8Kzwr/CgYKAC9dcTMzL10yMTABIAQXIyYhIgQHIzYkAmIBLwGZVY+c/hDo/rZPj2EBjgYzxrvuenS+wwAAAAH/VP4/BX//uAAGAA60BAYCBgAALzIyETMxMAchNQUFNSGsBSsBAP8A+tXHf7y9fwAAAAAC/t0CKQElBOkAGAAjACZAFQEEHRkKKQo5CgMKChQAWA4UWxkEWQA/Mz8zPxI5L3EzETkxMBMnBgYjIiY1NCU3NTQmIyIGByc2MzIWFRElMjY1NQcGBhUUFrIQNWZCb3kBVmA/OTJnMCt3g4OB/rhSZFFsYzwCNWVBMG5h0QwEG0s8JBVoP255/jNjWlYvBAQ6PzAuAAAAAAL+zwIpATME7AATABoAL0AeCzsXAcgX2BcCHxcvFwIXQAoNSA4XARcXFAZbDQBZAD8yPzM5L10rcV1xMzEwEyImNTQ2MzIWFRUhFjMyNjcVBgYDIgYHISYmJaK0qZKKn/4xDrk0ZFE4bmZEUQsBNAVHAim6oqLFqI5OyhYhfRkWAlJXTUpaAAAC/64CNQBSBdUAAwAPABlADg2fB68HvwcDB4ACWgFYAD8/GsxdMjEwEyMRMyc0NjMyFhUUBiMiJkiSkpoxIyAwMCAjMQI1AqqkKycnKygoKAAAAAAC/rYCKQFKBOwACwAVAA61FAlbDgNZAD8zPzMxMAEUBiMiJjU0NjMyFgUUMzI2NTQmIyIBSq+dl7GympS0/gK0XVdYXLQDi6S+waGnur2k739wb38AAAAAAf7RAikBLwTfABIAFkAKCw4HEloKWAMOWQA/Mz8/MxI5MTADERQzMjY1ETMRIycGBiMiJjURnINeWZF9DCZrPIWDBN/+TpFsdQFi/VZjMj13gwG8AAAAAAH++AIpAQoE7AAUAA61CwZbEABZAD8yPzMxMBMiJjU0NjMyFwcmIyIVFBYzMjcVBj2bqq6gbFgpaTS3WFZZak4CKbmlqrsrcSPscHcveysAAv7FAikBPQXyABEAHQAoQBYIDwAvCwELQBATSAsGgA5YGQZbEgBZAD8yPzM/GhDOK3ESOTkxMAMiJjU0NjMyFyY1ETMRIycGBicyNjU1NCYjIgYVFBSLnJ6JdU8EkXoNJGYuYU9VXVJQAim4qKu4ZSA8AQ/8Q2UzPnNrbhKBcYNv6wAAAf7TAjUBLwXyABQAH0AQDQgJQBATSAkQgAAIWAMQWwA/Mz8zGhDMKxI5MTATETQjIgYVESMRMxEUBzY2MzIWFRGghV5ZkZEGI285goQCNQGykml6/p8Dvf7RKh42OHeE/kcAAAH+CgI1AfYE7AAfACJAEBgTEBYRWgAIEFgDCwsbFlsAPzMzETM/MzM/ERI5OTEwARE0IyIGFREjETQjIgYVESMRMxc2NjMyFzY2MzIWFREBZnZaTJJ5VE+SfQwpXzmWPSN2Qnp6AjUBspJpZP6JAbKSa3j+nwKqZD8yeTw9eIb+RwAAAAH/KQI1ANkE7AAPABRACQ0KC1oKWAUAWwA/Mj8/ETkxMBMyFwcmIyIGFREjETMXNjZ3KTkQQRlLapF/CCdaBOwLfw90Xf6VAqqPVEgAAAAAAf7RAikBLwTfABIAFkAKCw4HEloKWAMOWQA/Mz8/MxI5MTADERQzMjY1ETMRIycGBiMiJjURnINeWZF9DCZrPIWDBN/+TpFsdQFi/VZjMj13gwG8AAAAAAH+qAI1AVgE3wAKAA61CAFaBQBYAD8yPzMxMAMBMxMWFzY3EzMBTv72nJMeCREak5z+9AI1Aqr+c1dBXDwBjf1WAAAAAf6uAjUBVATfAAsAFUAJCQMLBAFaCAtYAD8zPzMSOTkxMAMDMxc3MwMBIycHI1TypKSiovIBAKasrqYDkQFO5+f+sv6k9vYAAQApBG8BnAW2AAgAGUALCAQJCmAIAQiAAgYAPxrMXRESATk5MTATNjczFQYGByMpVkTZO4k4dwSHhaoUZaQqAAEAKf49AZz/hQAIABxADQQACQpgBAEEgA8HAQcAL10azV0REgE5OTEwBQYGByM1NjczAZwsWBbZeYN3k0OyOxXNZgD//wCe/j0Bvf+BAAcHl/63AAAAAP//AET/7ANmBF4CBgRDAAD//wBx/+wDkwReAiYARgAAAQcBTwFi/MwAIUAXATAhUCGAIbAh0CEFwCEBIQEVFx0PFCUBKzUAEV1xNQD//wBE/+wDZgReAiYEQwAAAQcBTwBk/MwAJEARATAhUCGAIbAh0CEFwCEBIQG4/+y0GB4ABSUBKzUAEV1xNQAA//8AP/74AY0EZgIGAB4AAAADAGb/7ARKBh8AHAAoADMAYEA1KwImJhAXMQkgIDEQAzQ1BSNhWQAFAQkDBRsbLl1ZABsQGwIJAxsbDRQUKV1ZFAENHV1ZDRYAPysAGD8rERIAORgvX15dKwAYEMZfXl0rERIBFzkRMxEzETMRMzMxMAEGBzY2MzIWFhUUBgYjIgIRNBI2MzIWFRQGBiMiEzI2NTQmIyIGBxYWEyIHFhYzMjY1NCYBUi8EM7R9f9J2fuSU+vSI+qOmv16lZrOAlqWahnSpMQ2XwqNfK5BFW2ZkBHOI7UdZb8V+nuh6AXkBh/YBdciLfVR7RPx9tamImVJG++wFCp8kLj8zO0QAAAD////OAAAFmQYIACcBVP3W/5cBBwJRARAAAAASQAoAAwABJh4eBAQlASs1AD81AAD//wAAAAAEiQcpAiYCUQAAAQcAav/zAVIAF0ANAgErBSYCAQAWKBQJJQErNTUAKzU1AAAA//8Ab/4UBVwGFAIGAd4AAAAB//b+FAUfBF4AKQBcQDAXHwcMCh0bDhECChsRIgooBwcKEQMqKwwdDh4PDhUZFF1ZGRAIJV1ZCBYABWFZABsAPysAGD8rABg/KwAYPz8SOTkREgEXOREzETMRMxEzETMRMxEzETMyMTABIic1FjMyNTUkETQ3ASM2EjU0JiMiByc2MyARFAcBMwYCFRQWMzI3ERAELzw/Ljli/t0l/c+4SE1eVi4iMT1eAU4lAji0RFRZXi8r/hQZlhNr0xUBwJV1/TOYAVahl6MRjhj+KZJ4As2F/pynnpsQ/pT+7wAAAAIAfQAABZ4FywAPABsAOUAdEAMODwoWFg8DAxwdDQAAE2tZAAAGDxIGGWlZBgQAPysAGD8SOS8rEQAzERIBFzkRMxEzETMxMAEkABEQACEyBBIVEAAFESMBFBYzMjY1NCYjIgYCsP72/tcBXAE3yQEonf7U/va4/o/w397v7d7h8AEIFgFBAQsBIAFBlf7ruf78/rsX/vgDaNft6dvZ6OoAAAAAAgBx/hQEaAReAA4AGAA1QBsPAw0OCRUVDgMDGRoOGwYXXVkGEAASXlkMABUAPzIrABg/KwAYPxESARc5ETMRMxEzMTAhJgI1EAAzMgARFAIHESMDFBYzMjY1ECEgAhK/4gEN8egBEdrHteehoZ6j/rz+wRsBJu4BDQEi/tj++e/+3R3+FAQbzdPTzQGYAAAAAQB9AAAEzwXLABQAMEAZDgMTFAgUAwMVFhEAa1kREQYUEgYLaVkGBAA/KwAYPxI5LysREgEXOREzETMxMAEkABEQACEyFwcmIyIGFRQEMzMRIwK+/uf+2AFqAT7mxESxs+r+AQXzQLkBMxcBKwEGARcBOVagVOLOzuD+NQAAAAABAHH+ngREBF4AIQA8QB0QHCEWFgscBQsFIiMZHBYICwUfAl1ZHw4TYVkOEAA/KwAYLysREgA5ERI5ERIBOTkRMxEzETMRMzEwBRYzMjY1NCYnJiY1EAAhMhcHJiMiBhUUFhcWFhUUBiMiJwFzWFxnblVl8OEBHgEb2cE+u6XAu3qhvqLJqnJOthdQQjc6CRf/8QEHARFQm06ww7ugEhSFfYilFAAAAAABAMcAAAPjBbYACwBWQDEJAQsFBQYBAgYCDA0CAgYLCwRpWUkLAQ8LPwtfC28LjwufCwYLAwsLBwYSBwppWQcDAD8rABg/EjkvX15dXSsREgA5GC8REgE5OREzETMRMxEzMTABESMRIREjESEVIREDsrD+fbgDHP2cAw7+WAEH/ZMFtqL9+gAAAQCw/hQDagRKAAsAWEAyBAgGAAABCAkBCQ0PCR8JLwkDCQMJCQwGBgtdWQ8GHwY/Bk8GBAkDBgYCARsCBV1ZAg8APysAGD8SOS9fXl0rERIAORgvX15dEQE5OREzETMRMxEzMTABIxEhFSERIREjESEBZLQCuv36Ab+w/vH+FAY2lv5S/kQBKQAAAAAB//b/7APNBcsAIwBFQCQKEwAWEgEPBBYhHCEEAyQlEgFpWRISHgwMB2tZDAQeGWtZHhMAPysAGD8rERIAORgvKxESARc5ETMRMzMzETMzMjEwASETNjU0JiMiByc2MzIWFRQHByEDBhUUFjMyNxUGIyImNTQ3Ar79hnYdNCowJC9DUICEJTkCe4kdNS05Ljw/eIwlAqoBk1o4MS8Wkx9/cVF6xP4tWjgxLxOWGH9wUXoAAAABAGL+FAQQBh8AKABIQCckFQ8LABUgERsgAAYlBikqJg8kEQQQQCUlHQgIA11ZCAEdGF1ZHRsAPysAGD8rERIAORgvGs0XORESARc5ETMRMzMRMzEwATQmIyIHJzYzMhYVFAYHAyUVAwYGFRQWMzI3FQYjIiY1NDY3EwU1EhIBby0rLTQxT1dyhigmkQLM7S4QQUk+KEBDjZQfHc79J4eIBTEqLhaNH3FmPpVl/oOWd/1liUkiPD0QjRmCfjh1TwJCnHEBXAFhAAH/7AAABDMFzQAaAEhAJhIHBAkAAAQYAgIEDAYLBRscDAkHBAQADgoKAAUFFQASFQ5pWRUEAD8rABg/EjkvETkvERIXORESARc5ETMRMxEzETMyMTAhEhE0JwUnASYnAScBJiMiBgcnNjYzIAAREAMC9H0H/ntPAb4dNf4bUgHGbJJctj9WW85+AUQBXHkBLwEhUjvliQEGi1b+5IsBCko2NI9EOf4s/lP+2v7aAAAAAf9m/hQDrAYfABcAUUAtDgUABxYWEgAACgQJBBgZCQoHAwgEAgUDDgMICA4vAwEAAxADAgMDFg0OARYbAD8/MxI5L11dETkvERIXOREXORESARc5ETMzETMRMzIxMCU0JwUnJSYnBSclJiQnJwQAABEUAgcjEgL4E/4fLQHwKk3+Ky8Brnf+yKs6AUwB8QEJVky4pvhdgJ6HopGAmouOlMsoqD/+lP3T/qvJ/n6TAVsAAAAAAQC4/mYHdQW2AC4AR0AkJxEOGhcjIAAAFw4DLzABCAshGA8DHRQLFGlZBAsTJyhpWScjAD8rABg/MysRADMYPzMzEjk5ERIBFzkRMzMRMxEzMzEwJSMGBiMiJicjBgYjIiY1ETMRFBYzMjY1ETMRFBYzMjY1ETMRFAYjITUhMjY1NDYGxQszyHeLry0KOtN+08G5doGtpbl3hayhuevc+woE+nqOCKhXZWRoYmrY5gQM+/SPkMHNA5379I+QzOsDdPpjz+Sik34qQgAAAQCm/ikGzQRKACwAT0AoJA8MFxQdKSAsLCkUDAQtLgAGAAYJDSQlXVkkHhUNDxoRCRFdWQMJFgA/MysRADMYPzMzLysREgA5ORgvLxESARc5ETMRMxEzETMzMTAlBgYjICcjBgYjIiY1ETMREDMyNjURMxEUFjMyNjURMxEUBiMhNSEyNjU1NDcGFzKqaP7+Tgo1t3S6ubLfmJGybnSYjbTw/PvFBEaSnQaWU1e4WGC/1ALL/T3+/K+6Al79PYKCu9ICOvu46+6VoZUaQkYAAAEAcwAABJwFywAeAEBAIB4PBRkOCxIPGQ8fIBMcFhYIaVkWFg8MAw8SHAJpWRwEAD8rABg/PxI5LysREgA5ERIBOTkRMzMzETMRMzEwASYjIgYVFBYzMjY1ETMRIxE0NyMGBiMiAjU0ADMyFwM7Y26TopKVzbq5uwsLPdiC4fYBD+uOeAT6L76qq77K7AGo+koB8C9gXGsBEfrsARwxAAABAHH+FAQ/BGAAHwA8QB8DFwwJEA0NHRcDICERFBoKDw0bGgBdWRoQFAZdWRQWAD8rABg/KwAYPz8REjkREgEXOREzMzMRMzEwASIGFRQWMzI2NREzESMRNDcjBgYjIgIREBIzMhYXByYCIXl9ko2mm7S0Cw0ys3HV7eDQHVkjLzEDy9rOy9W41wI4+coB6lRGUVkBKAEPARUBJg0NkBUAAQDH/gAE7AW2ABwAOUAdEQQAAAELFgEWHR4HGWlZBwcBAg4UaVkOHAIDARIAPz8/KxESADkYLysREgE5OREzETMRMzMxMCEjETMRNjYzMhYSFRAAISImJzUWMyAREAIjIgYHAX+4uEe0XZ/zg/7j/vxVgEZ7iQF3va1emEwFtv2mPj2o/r/X/oL+ZxUcpDECcwECARo2RQABAK7+CgQjBEoAGwA3QBwMABgYGQcSGRIcHQMVYVkDAxkaDxkVCg9hWQocAD8rABg/PxI5LysREgE5OREzETMRMzMxMAE2NjMyFhIVEAIjIic1FjMyNjU0JiMiBxEjETMBYjx+U4TEbPThjGpufpWPjY2IZLS0An0uMo3+8sH+0/62PJ894/jU6GL+IwRKAAABAGD/7AP+BcsALQBUQCsGAx4QFyMDChArKwojAy4vExcQJyMrDwcBCgMHByANDQBpWQ0EIBppWSATAD8rABg/KxESADkYL19eXRESORESORESARc5ETMRMxEzETMRMzEwASIGFRQWFwcmJjU0NjMyFhUUBgcOAhUUFjMyNjcVBiMiJDU0NjY3PgI1NCYCSHt7ExCmFh/lxcrwqs+fjz6lpGHWY7Du9P72UqiqjH03hAUpY3IdTBxAHG1CrMXJrJzLSzpSXkNufjw5sGTZwmOScj80TV9HZ24AAAAAAQAx/hQDwQReACoAVkAtKigVCA8bKAIIIiICGwMrLAsPCB8bIg8AHwACCQMAABgFBSVdWQUQGBJdWRgbAD8rABg/KxESADkYL19eXRESORESORESARc5ETMRMxEzETMRMzEwEyY1NDYzMhYVFAYHDgIVFBYzMjcVBgYjIiQ1NDY2NzY2NTQmIyIGFRQX0z3au7vbvMaEkD+hmNrCWbuG7v74WbbKi3N3ZnBzKwIZYoGjv7+jnNhVO2NtUZCdXpkuLerXbqaKWjyJbF9ubmtSSAAAAAACACkAAAR9BcsAHQAgAEdAJg0eHhoBHBMgAR8IHQchIh4NHQoWBQoFa1kRCgQbAB8dHR9pWR0SAD8rERIAOTkYPzMrEQAzERI5ORESARc5ETMzETMxMDcBLgIjIgcnNjMyFhc+AjMyFwcmIyIGBwcBFSEBASEpAdFVLzAjIyEvRD9Yfkc4Slo8QEQwISIyQi4xAdf7rAIn/qgCsHEDtLBDGQ6LHWOTb1kuHYsORV1k/ERvA4H9IQAAAgAhAAAEMwReAAIAIgBHQCYcAgIJDgsiAQ4AFQwHIyQCHAwYBRIYEl1ZIBgQCg0ADAwAZFkMFQA/KxESADk5GD8zKxEAMxESOTkREgEXOREzMxEzMTA3IQEBJiMiBgcHARUhNQEnJiYjIgcnNjYzMhYXFzc2NjMyF/YCZP7VAXcUHy43Il8BpvvuAbBkJDIlGCAtGTErRmApXlwmYEc4Q40B1QFaDSgyiv2We3sCZpIzIw2KCQ89OoWHOTwYAAAAAgB9/+wFngXLABcAIQBSQCoVGgMfHxAKGhAaIiMEAwMNBwccaVkABwEPAwcHDRMTAGlZEwQNGGlZDRMAPysAGD8rERIAORgvX15dKxESADkRMxESATk5ETMRMxEzETMxMAEiBgczNjYzIAAREAAhIAAREAAhIBcHJgMgERAhIgYVFBYDL8DdNghU25gBJwFM/qv+xf6+/rEBbwFDASesSq/7Ac3+QuT68gUpqLJYTv7M/vD+8P7LAWQBWgF1AaxWnFD7YgGmAaLLttzrAAAAAAEAcf/sBGIEXgAjAD9AIAggGQ4OAyAUAxQkJR0WXVkdHQAGBgxdWQYQABFdWQAWAD8rABg/KxESADkYLysREgE5OREzETMRMxEzMTAFIgAREAAhMhcHJiYjIBEUFjMyNjU0ISIGBzU2NjMyFhUUBgYCbe7+8gEwAQXEsTZJrEz+h6Ofkqn+8kygMy2daczofuQUASYBAAERATs7mh0h/lDD04x24S4omCMvwKd4w2oAAQApAAAESgW2ACAAQ0AiFBkgDg4dDwMJCQYPGQQhIgYXFw8cDREcEWtZIBwPHgMPEgA/Pz8zKxEAMxESORgvMxESARc5ETMRMzMRMxEzMTABMhYVFAcjNjY1NCYjIxEjESMiBhUUFyMmNTQ2MzMRMxEDKZWMF6IHDjxBk7mTQTwUohaMlZO5BEp9hTk4Dj8iLjj8VAOsOC5ALzY7hX0BbP6UAAABAB8AAAPpBhQAFgA8QB4KEQAEBBQFAgURAxcYDg4FExUABRUDBxMHXVkAEw8APzMrEQAzGD8/ERI5LxESARc5ETMzETMRMzEwASEVIREjESMiBhUUFhcjJiY1NCEzETMCdwFy/o60nDg1DwaaCQ0BCpq0BEqW/EwDtDMrHzsMEkYe5AHKAAAAAf/2/+wFHwReACIASEAlCREUIR8PDQANAxQfGh8DAyMkIQ8AEA8AFQsGXVkLEBwXXVkcFgA/KwAYPysAGD8/Ejk5ERIBFzkRMxEzMxEzETMRMzIxMDM2EjU0JiMiByc2MyARFAcBMwYCFRQWMzI3FQYjIiY1NDcBlkhNXlYuIjE9XgFOJQI4tERUWV4vKz1Gmqgl/c+YAVahl6MRjhj+KZJ4As2F/pynnpsQjRjk85V1/TMAAgBx/hQEZgReACEALgBDQCQXIiIKHwIQKCgCAAoELzAGG11ZBgYTABsNK11ZDRATJV1ZExYAPysAGD8rABg/EjkvKxESARc5ETMRMxEzETMxMAE2NTQmJiMiJgIREAAhMgAREAIjIiYnIx4CMzIWFhUUBwEWFjMyNjU0JiMiAgMC9gghVUSzxFwBEwEH2wEA/udsilAKDz9ybYaJOxX9lFuaY6GOjZ6srwH+FBYaGxMHkwFEAR0BcAGB/tD+9/7w/tcyTIuRQBk8QCg9AvRMO9TQ0tL+y/7bAAAA//8Acf/sA5MEXgIGAEYAAP///4/+FAFzBeUCBgBNAAD//wB9/+wFwwXNAgYCewAA//8Acf/sA7AEXgIGAe0AAP//ADv/7AODBF4CBgHnAAD//wDHAAAEbwW2AgYAoAAA//8Arv4UBHsGFAIGAMAAAP//AH3/7ATPBcsCBgAmAAAAAQDHAAAGewW2ABMANEAZAgUFBg0RDgYOFBUBEgkDBwAABgsHAw4GEgA/Mz8zEjkvEhc5ERIBOTkRMzMRMxEzMTABASMWFREjETMBMwEzESMRNDcjAQNM/h4ID6r6AdkIAeH4tg4I/hgB7AMWoOv8iQW2/PADEPpKA4OW5/zsAAABAK7+FAUhBEoADwA1QBoDBAQFCw0MBQwQEQEOCAMGAAAMCQYPDBUFGwA/Pz8zEjkvEhc5ERIBOTkRMzMRMxEzMTABARYVESMRMwEBMxEjETcBApr+vASs1QFkAW/LrAT+tAGFAeVQcPtqBjb92wIl+7YCtLn+GAACAAj+FARmBF4AFwAjAE9AKhcVHBAOAxQUARUIISESFQMkJRMXABdeWRAAAAsVGwUYXVkFEAseXVkLFgA/KwAYPysAGD8SOS8zKxEAMxESARc5ETMRMzMRFzMRMzEwFzMREBIzMgAREAIjIicjFhchFSEVIzUjASIGFREWMzI2NTQmCJr97NsBAP7nsHkKBQUBi/51tpoCe5mSdLOhjo2wAuMBCwEg/tD+9/7w/tdcH9mPra0FBsrM/rRk1NDS0P//AD//7ASRBcsCBgOGAAD//wB9/+wEzwXLAiYAJgAAAQcAeQIOAAAAF0AOAT8h3yECIQE2GB4DCCUBKzUAEV01AAAA//8AP//sBJEFywImA4YAAAEHAHkA2wAAABm3AT8h3yECIQG4/8m0GB4LECUBKzUAEV01AAAB/ocGFAFxBw4AEwAnQBoHvw4BDkAJDEgOEwIvCj8Kbwp/Cq8K7woGCgAvXcQyzStdMjEwARUjIi4CIyIVIzU0NjMyHgIzAXERV494Yyprg3xuOnB3hE4GmH8jKiN1H25tJSwlAAAA//8Aff6kBcMFzQIGADQAAP//AHH+FAQ9BF4CBgBUAAD//wAZAAAHVgW2AgYAOgAA//8AFwAABjMESgIGAFoAAAACABQAAAQhBE4ABwAOADNAGwcIDgQEDxANCwELBAsEBQ4CXVkODgQFDwAEFQA/Mz8SOS8rERIAOV9eXRESARc5MTAhAyEDIwEzAQEDJicGBwMDZoX+b4G7AbmdAbf+jXUVCQsSdQFK/rYETvuyAd8BLzQ5Py7+0QACABAAAAUzBEoADwATAGtAPQoODhEBCAAADAEQBQUUFQoNXlkZCgEICugKAhAMCgEUAwoKAQYQA11ZEBABBgUVEwkGCV1ZBg8BDl1ZARUAPysAGD8rEQAzGD8REjkvKxESADkYL19eXV5dXSsREgEXOREzETMzETMxMCEhESEDIwEhFSERIRUhESEBIREjBTP9mv6mpL8CGQMK/lABlP5sAbD8hQEVMwFK/rYESpT+z5H+nwFMAdUAAwBo/+wGiwReACgAMwA6AHtASAoAHTgWMAQEGAApKRg3FgQ7PCMQEyEwA2BZFzheWTAJFwESDxcvFz8XfxePFwUTAxcXEyEmLF5ZIRphWSYhEAc0EzRdWQ0TFgA/MysRADMYPzMrKxESADkYL19eXV5dxSsrERIAOTkREgEXOREzETMRMxEzMxEzMTABEAUHFRQWMzI3FwYGIyImJwYGIyICNTUhAiEiBgc1NjYzIBc2NjMyFgc0JiMiBhUVNzY2ATI2NyEUFgaL/g25cHaLqDdHy2eApCw2qm/G6QK/Cv7MWJleUZtqASB/V8SFpLi6YV6Nopqxo/xIdokL/gR+Axn+sRAGRXt1VIcoNlNdVV0BDN9vAX8gLJ4lIueAZ6uYVlymlGIGB2r916GYmKEAAAMAFAAABDUESgATABoAIwBwQDwNCAoYDA8BExsVFQMTCB8PGBgfEwMkJQ0UAQIBZFkKGzUCAQkCAREPAgEUAwICEwQEI15ZBA8TFV5ZExUAPysAGD8rERIAORgvX15dXl1dMzMrEQAzMxESARc5ETMRMxEzMxEzETMRMxEzETMxMBMjNTMRITIWFRQHMxUjFhUUBiMhExEzMjU0IyczMjY1NCYjI7CcnAFQ5NJW1a5U08D+aLbH8Pq9tXlodoCgAfaNAceHkmxCjUdylagB9v6Zu6yNS1VQRQAAAAABAHn/8gPnBFgAFgAmQBQDDxQJDwMXGBIAXVkSEAwGXVkMFgA/KwAYPysREgEXOREzMTABIgYVFBYzMjcVBgYjIgAREAAzMhcHJgKcqL24rXKoSolc/f7vASb/tZRFlgPF4MDJ1zGTHBUBJwEOAQQBLUiNQgAAAAACALAAAARKBEoACAAQAChAFA4EAAkECRESBQ1eWQUPBA5eWQQVAD8rABg/KxESATk5ETMRMzEwARAAISERISAAAzQmIyMRMyAESv7N/uH+uAFpAQ4BI73Hwp5/AagCL/7v/uIESv7i/v/FyPzZAAAAAgBKAAAESgRKAAwAGAB+QFIGBBIWFggEAA0NFAQDGRoVBgcGXVkSOAcBlQcBaQcBHwcvBwIfB28HfwevB78HBQ8HHwc/B08HnwfPB98H7wcICwMHBwQJCRFeWQkPBBZeWQQVAD8rABg/KxESADkYL19eXXFyXV1xMysRADMREgEXOREzETMzETMRMzEwARAAISERIzUzESEgAAM0JiMjESEVIREzIARK/s3+3/7GcnIBXQELASa9x8KTARL+7nQBqAIv/u/+4gHVkwHi/uP+/sXI/rCT/rwAAQCwAAADQgRKAAsAUUAuBgoKAQQAAAgBAwwNBgleWRkGAQgG6AYCEAwGARQDBgYBAgIFXVkCDwEKXVkBFQA/KwAYPysREgA5GC9fXl1eXV0rERIBFzkRMxEzETMxMCEhESEVIREhFSERIQNC/W4Ckv4kAb/+QQHcBEqU/s+R/p8AAAAAAQBW//IDewRYACQAY0A6AxkgCgAZBRERGRQKBCUmAxQVFRReWZwVAVgVaBUCbxV/FQIPFR8VAgsDFRUIIiIcZFkiEAgOZFkIFgA/KwAYPysREgA5GC9fXl1xXV0rERIAORESARc5ETMRMxEzETMxMAEUBgcWFRQGIyInNRYWMzI2NTQhIzUzMjY1NCYjIgYHJzYzMhYDZHVk8PPcz4dPtVCJjv7CsqqZlmtcTIphVKvgsdADQmWCHDTOnK9BniUvZFyzj2FTSFIoPXV7lQAAAAIAsP5kAYMESgADAA8AJ0AVAgMEAwoDEBEHDWNZnwcBBwMADwMVAD8/EMRdKxESARc5ETMxMBMzESMTFAYjIiY1NDYzMhbBtLTCPS0qPz8qLT0ESvu2/tc8Nzc8Ozg4AAH/pP7jAV4ESgAMACJAEgIKBwcNDgAFXVkAABAAAgAIDwA/L10rERIBOREzMjEwEyInNRYzMjURMxEUBitGQUY9g7Sg/uMbkRabBDb71ZmjAAAAAAEAsAAABAwESgANADZAGwgEBAUNAgwAAAIFAw4PAg0IAwMDBQsGDwEFFQA/Mz8zEjkRFzMREgEXOREzETMRMxEzMTAhIwEHESMRMxE2NwEzAQQM0/6FWLa2KiABe9P+PwH0TP5YBEr+CjUjAZ7+HgABACsAAANGBEoADQBMQCwDAAcLCwQADQkAAw4PAQMECgcJBghADwIfAs8C3wIECQMCAgAFDwALXVkAFQA/KwAYPxI5L19eXRrNFzkREgEXOREzMxEzETMxMDMRByc3ETMRNxcFESEVsD1IhbbFSv7xAeABYCN3TgJI/iBxe5f+zZYAAAABALAAAAUjBEoAEQAwQBcDBAQFDA4NBQ0SEwEQCAMFCgYPAA0FFQA/MzM/MxIXORESATk5ETMzETMRMzEwIQEWFREjETMBExMzESMRNDcBApr+vgSs9AFDpqTytAT+ugNCUlr9agRK/LgBowGl+7YCoFZO/LwAAAABALAAAARCBEoAEAAsQBQCBQUGAA4MBgwREgIMBg8HDwEGFQA/Mz8zEjk5ERIBOTkRMzMRMxEzMTAhIwEWFREjETMWFgAXJjURMwRCxf3VCqzFBkQBV4YGrANGpzv9nARKC2n9+clIkQJrAAACAHn/8gSaBFoACwAXAChAFAwGABIGEhgZCRVdWQkQAw9eWQMWAD8rABg/KxESATk5ETMRMzEwARAAIyIAERAAMzIAARQWMzI2NTQmIyIGBJr+6vv//u8BFP76ARX8naympqyqpqauAif++f7SASkBDAEMASf+0/76y9nWzs3T0wAAAAABAEj/8gO0BFgAFgAmQBQUCAgNAgMXGBELXVkREAAFXVkAFgA/KwAYPysREgEXOREzMTAFIic3FjMyNjU0JiMiBzU2NjMyABEQAAGm0I5DlnCsu7+ocqhEimHxAR3+7w5HjkLYyMDgMpQaF/7R/v7+8v7ZAAIAMwApBKYEIQAMABcAJkASEAMKFgMWGBkAE2JZAAcNYlkHAC8rABgvKxESATk5ETMRMzEwJSAANTQ2JDMgABUUAAEiBhUUFjMyNjUQAm/+9P7QigEErgEKAS3+zv77zdnW0MzTKQEQ8JXlfv7z8ej+7gM9oaCdpaSgAT8AAAAAAQAzAJMEpgO2ABYAK0ATCgwUEhIABgwADBcYCRUDD2JZAwAvKwAYL8YREgE5OREzETMRMxEzMTATNAAhIAAVFAYHJzY1NCYjIgYVFBcjJjMBIwEQARQBLCAclTPQ0MfSP588AcHtAQj+9vhPmDo4iGWln56bk4p0AAAAAAMABgAlBNEEHQATABoAIgBRQC0QDQYgAxYeDxcNGRkXHR4IBQMHIyQWHhcdBBsUCAUPEgQACgAbYlkAChRiWQoALysAGC8rERIAFzkREhc5ERIBFzkRMxEzETMRMzMRMzEwJSAANTQ3JzcXNiEgABUUBxcHJwYDIgcBNjUQATI3AQYVFBYCcf70/tBEc1B/mgECAQoBLUpzToGb9qdgAnMz/mGWZv2NL9UlARDwlXNSd1yD/vPxlXZPeVqBAz0x/j5HbQE//X0vAcNFa5unAAADAGr/7AcjBFwAHwArADIAdEBDAg8PJhwwFSYXCSAgFy8VBDM0Ag8SABYwXlkJFgESDxYvFj8WfxaPFgUTAxYWEgAFI11ZABlhWQUAECksEixdWQwSFgA/MysRADMYPzMrKxESADkYL19eXV5dKxESADk5ERIBFzkRMxEzETMzETMRMzEwASAXNjYzMhYSFRAAIyImJwYGIyICNTUhAiEiBgc1NjYBNCYjIgYVFBYzMjYFMjY3IRQWAfoBIYE/0IiT43r+9+2Cyz48wH/N8ALaCv6+XJ1jUJ8E25KjoJOVoqGQ+8N8jgz96oMEXONwc4v+/q7+9P7XcXBucwEJ5G0BfSAsnSUj/cXP19LM19HS1J+Yl6AAAAAAAgB1//ID9ARKABoAIwBMQCcJIBMbGxEYFQYDCyAgAxURBCQlCRMiAAAiXlkAAA4EFg8OHV5ZDhYAPysAGD8zEjkvKxESADk5ERIBFzkRMxEzETMRMxEzETMxMAEyNjU1MxUUBgcWFRQGIyImNTQ3JjU1MxUUFgMUITI2NTQhIAIzZ260SFPT8tHI9M+Ytm2eAQKGfv78/v4C3XFolJRkjStT3bDIzqrgUlTGlJRncv6N53pt4gAAAAEAeQInBJoEWgANAB5ADQcNDg8ABwoKA11ZChAAPysAGBDEMhESATk5MTABNCYjIgYVIxAAMzIAEQPbqqamrr4BFP76ARUCJ83T080BDAEn/tP++gAAAAABAHn/8gSaAicADQAeQA0GAA4PDQYDAwpeWQMWAD8rABgQxDIREgE5OTEwARAAIyIAETMUFjMyNjUEmv7q+//+776spqasAif++f7SASkBDMvZ1s4AAAAAAgCwAAADmgRKAAkAEgA6QB4KBQUGAA4GDhMUBApeWQAEARMDBAQHBhUHEl5ZBw8APysAGD8SOS9fXl0rERIBOTkRMxEzETMxMAEUBiMjESMRISABMzI2NTQmIyMDmu3eabYBOQGx/cxYmod8hncDAKq2/mAESv3nYGlgXgAAAAACADEAAAOHBEoADQAVAEdAJAMSAhIGCxUMBgwWFwMAFAAUXlkAAAERAwAACQwCFQkPXlkJDwA/KwAYPzMSOS9fXl0rERIAORESATk5ETMzETMzETMxMAEBIwEmJjU0NjMhESMRESMiBhUUMzMCH/7j0QE5c2jX2QFItJF6fv6LAbr+RgHbKptwmKL7tgG6Af5RXcAAAgAxAAADhwRKAAcAFQBHQCQSAxMDDwsIBw8HFhcSARUVAV5ZDxUBEQMVFQwJEw8MBl5ZDBUAPysAGD8zEjkvX15dKxESADkREgE5OREzMxEzMxEzMTABIyIVFBYzMxERMxEhIiY1NDY3ATMBAtOL/n56kbT+uNnXaHP+x9EBHQIAwVxSAf4Bu/u2opdwnCoB2/5FAAEAKwAAA4sESgAHACVAEgABBgEDAwgJARUHAwQDXVkEDwA/KxEAMxg/ERIBFzkRMzEwISMRITUhFSECN7b+qgNg/qwDtpSUAAEApv/yBB8ESgARACVAEQoHARAHEBITEQgPBA1eWQQWAD8rABg/MxESATk5ETMRMzEwAREUBiMiJjURMxEUFjMyNjURBB/t1NHntoKGf4cESv09u9rXwgK//T19h4Z+AsMAAAABAE4AYASsA/4AFAA2QBoLEAwNDQQIFBAEFAQVFhQAYlkUDAgJCGJZCQAvKxEAMxgvKxESATk5ETMRMxEzETMRMzEwEyEyNjU0JiMhNSEVBxUWFhUUBiMhTgK+hYO70v3HBEqWUVnEz/01ARJ3f6mZtJEbCjO4b8rEAAAAAwBOAGAGBgP+ABQAIAAsAGpAOAsQDA0NBBgkJB4qCBQQBAQUKgMtLhsVDxWfFa8VAwkDIScVJxUUCRQAYlkAFAELAxQMCAkIYlkJAC8rEQAzGC9fXl0rERIAOTkYLy8zX15dETMREgEXOREzETMRMzMRMxEzETMRMzEwASEyNjU0JiMhNSEVBxUWFhUUBiMhAzIWFRQGIyImNTQ2EzIWFRQGIyImNTQ2AagCvoaDvdH9xwRKllJYxc79NfQ2Ly82NDIyNDYvLzY0MjIBEnl/qJi0kRsKNLdtysYC7jgoJzo6Jyg4/n84Jic6OicmOAAAAQBO/xsErAVCACIAdkBEEx4UFRsbBBAIIhgeHgwEIgQjJBQQGggJCQhiWQkJABEiAGJZACIBDQMiERBiWQ8RLxE/EQNvEZ8RvxH/EQQQESARAhEAL11dcSsAGC9fXl0rERIAORgvKxESADkRMxESATk5ETMzETMRMzMRMxEzMxEzMTAXITI2NTQmIyE1ISA1NCYjITUhFQcVFhYVEAcVFhYVFAYjIU4CwoKCr7n9ogLCAQS60/3HBEqWUFq4WGDA0/01M2xzmJGy4ZmMtZIaCy+raf7+Tgo0t3S7uQAAAAABABQAAAPnBEoACgAaQAsACAsMBAoHAA8KFQA/PzIRORESATk5MTATMxMWFzY3EzMBIxS/+iMOESD6vv5rqgRK/UdkRlZZArT7tgAAAQApAAAFzQRKABgAIkAQCRgZGg0UBAMIFxAJDwEIFQA/Mz8zMxIXORESATk5MTAhIwMmJwYHAyMBMxMWFzY3EzMTFhc2NxMzBKCu2RsGCBvTrv7VvqwREAobxbDNFhEIGKy/AsdVNC1g/T0ESv1oQFlLVgKQ/WxHXUBgApgAAAABAFYAAAN5BEoACQA4QB0EAQcAAAMIAQQKCwcEBQUEXVkFDwIIAQEIXVkBFQA/KxESADkYPysREgA5ERIBFzkRMxEzMTAhITUBITUhFQEhA3n83QIz/d8DAv3NAkJ5AzuWefzFAAAAAAEARP/sA48ESgAXAEtAJwASBAcBDAcSEgUWDAQYGRcFBRZeWQUFCgQBAgIBZFkCDwoPXVkKFgA/KwAYPysREgA5EjkYLysRADMREgEXOREzETMRMxEzMTABITUhFQEEERQGIyInNRYzMjY1NCYjIzUCav3uAwz+qAGD997yhLe9j5aan5QDvoyH/tch/saftEeiVmdkZlqFAAAAAAEAZP/yA0YEWAAjAD1AHg0AHxIYABIHAAckJRUYEgQABw8KXVkPECEbXlkhFgA/KwAYPysREgA5ERI5ERIBOTkRMxEzETMRMzEwEzQ2Njc2NjU0JiMiByc2MzIWFRQGBwYGFRQWMzI2NxUGIyImZD+AiYlaXV1imDeXlqnMhaSdZ3JxRK5WibnC3AEtSm5XMzBSRj9PQpFEmoV1mTs4V0BLUywmokGoAAEAIf/sA+kEWgAhAC5AGAALBiALFxoQBiIjCw4dEAMUDhRdWQgOFgA/MysRADMYPxI5ERIBFzkRMzEwARYWMzI3FQYjIiYnBgYjIic1FhYzMjY3JiY1NDYzMhYVFAJ/RIQsNz8yVVunWFypWVM2DkoWOXtKfHPIoqHIAUJcZRWQGnZzdnMakAcOXWSD22SWwL+XwQABALAAAANIBEoABQAfQA4DBAQBBgcEFQUCXVkFDwA/KwAYPxESATk5ETMxMAEVIREjEQNI/h62BEqW/EwESgAAAQAUAAAD5wRKAAoAGkALCAALDAQICQ8BCBUAPzM/EjkREgE5OTEwISMDJicGBwMjATMD5776JA0RIPq/AZaqArhqQFRa/UwESgAAAAEAsAAABBIESgAHACVAEQQFAAEFAQgJAQUVBgNdWQYPAD8rABg/MxESATk5ETMRMzEwISMRIREjESEEErL+BrYDYgO2/EoESgAAAP//ALAAAAOaBEoCBgWgAAAAAQBtAAAEyQRKABkAQEAgCgcPAAAMARQREQEHAxobDwwZAgIMXlkCAgESDQgPARUAPz8zMxI5LysRADMRMxESARc5ETMRMzMRMxEzMTAhIxEjIiYmNREzERAhETMRIBERMxEUBgYjIwLyswySy2m2ARyzAR65asuUDgFEZbZ6AXH+k/74AnX9iwEEAXH+kXy2ZQAAAAEAHf/wA/IESgATAClAFAABCgEUFQEVEgNdWRIPCA1kWQgWAD8rABg/KwAYPxESATk5ETMxMCEjESEHAgIGIyInNRYzMjY2EjchA/K3/ssWMkd2aT49QCoqMzBJEgKDA7ar/mz+8HcYlB9R1wIXjgACABICtAPRBrAABwAOAClAFQcIDgQEDxALBQIwDgEODgQFSQAETgA/Mz8SOS9dMxE5ERIBFzkxMAEDIQMjATMBAQMmJwYHAwMle/6NeawBmJEBlv6qbRIKDg1qArQBMf7PA/z8BAG7ARgxNEUg/ugAAgAOArQEzwasAA8AEwBTQC8KDg4RAQgAAAwBEAUFFBUNFgoBygraCgKZCqkKAgoKBgMQEAEGBU4TCQkGSQ4BTgA/Mz8zETM/ERI5LzMROS9dXXEzERIBFzkRMxEzMxEzMTABIREhAyMBIRUhESEVIREhASERIwTP/cn+vpiwAfICz/5wAXX+iwGQ/MkBADACtAEx/s8D+In+5YX+ugEyAbIAAAADAKQCtAORBqwADQAWAB0AT0AUBxIOGBgNBBIJGxsSDQMeHwcXFw64/+tAFRpJyg7aDgKYDqgOAg4ODRYASRgNTgA/Mz8yETkvXV0rMxI5ERIBFzkRMxEzETMRMxEzMTATITIWFRQGBxYVFAYjIRMzMjY1NCYjIxERMzI1NCOkATXVwVRQxsWv/oeopnBhbXeTuN3nBqx+hlJyFzm7iZwCUkZQSj/+YP60rKAAAwASArQD5QasABMAGgAjAGNAHg0IChgMDwETGxUVAxMIHw8YGB8TAyQlDRQBAQobArj/60AVGknKAtoCApgCqAICAgITIwRJFRNOAD8zPzMSOS9dXSszMzMRMzMREgEXOREzETMRMzMRMxEzETMRMxEzMTATIzUzESEyFhUUBzMVIxYVFAYjIRMRMzI1NCMnMzI2NTQmIyOkkpIBNdXBUMahTcWv/oeouN3nrqZwYW13kwSFgQGmfoZhQYFEaImcAdH+tKyggUZQSj8AAgCkArQD+AasAAgAEAAgQA4OBAAJBAkREg0FSQ4ETgA/Mz8zERIBOTkRMxEzMTABFAAhIREhMgAHNCYjIxEzIAP4/uX+9v7RAUz4ARCuv66RdQGJBLr8/vYD+P75772y/RYAAAEApAK0AwIGrAALAD5AIwYKCgEEAAAIAQMMDQkWBgHKBtoGApkGqQYCBgYBBQJJCgFOAD8zPzMSOS9dXXEzERIBFzkRMxEzETMxMAEhESEVIREhFSERIQMC/aICXv5KAZ3+YwG2ArQD+In+5YX+ugABAG8CtALPBqwACwA8QCIHCwoFAQEDCwMMDQMWBAHKBNoEApkEqQQCBAQLBwhJAAtOAD8zPzMSOS9dXXEzERIBFzkRMzMRMzEwEyERITUhESE1IREhbwG2/mQBnP5KAmD9oAM9AUaFARuJ/AgAAAAAAQBxAqgD4wa4ABkALkAWDAISBwIXFxkHAxobGQAABA8KShUETwA/Mz8zEjkvMxESARc5ETMRMxEzMTABIREGIyIANTQAMzIXByYjIgYVFBYzMjcRIwJcAYen0u/+9gEl+6ubO5CBrMC5r0Vv3wTZ/gQ1ARL29AEUQYdBzbS/xBIBEQAAAQCkArQD0wasAAsAPkAiCAQEBQAJAQUBDA0DFggBygjaCAKZCKkIAggIBQoGSQEFTgA/Mz8zEjkvXV1xMxESATk5ETMzETMRMzEwASMRIREjETMRIREzA9Oo/iGoqAHfqAK0Ac3+MwP4/lwBpAAAAAEAVAK0AekGrAALADBAFggAAAoFAQEKAwMMDQkEBAZJCgMDAU4APzMRMz8zETMREgE5ETMzETMRMxEzMTABITU3ESc1IRUHERcB6f5rd3cBlXh4ArRgGwMAHWBgHf0AGwAAAAAB/6oBrAFEBqwADQAhQBECCwgIDg8FQABgAMAAAwAJSQA/xF0yERIBOREzMjEwEyInNRYzMjY1ETMRFAYnPUBHNDw9ppIBrBmHFUpGA+X8JY2YAAABAKQCtAO+BqwADQA4QBwIBAQFDQIMAAACBQMODwINCAMLAwMFCwZJAQVOAD8zPzMSORESFzkREgEXOREzETMRMxEzMTABIwEHESMRMxE2NgEzAQO+wv6iUqioFCMBa8T+YAK0Ac9I/nkD+P4vGisBjP5CAAEApAK0AwYGrAAFABpACwMAAAUGBwFJAwBOAD8yPxESATk5ETMxMBMRMxEhFaSoAboCtAP4/JOLAAAAAAEApAK0BMEGrAAQADBAFwEEBAULDwwFDBESCAEPAwUJBkkADAVOAD8zMz8zEhc5ERIBOTkRMzMRMxEzMTABARYVESMRMwEBMxEjETQ3AQJo/tUFnuEBKwEx4KgE/tMCtAMCXEH9mwP4/PgDCPwIAm1RRvz8AAAAAAEApAK0A/AGrAANACxAFAIFBQYACwkGCQ4PCQIGDAdJAQZOAD8zPzMSOTkREgE5OREzMxEzETMxMAEjARcXESMRMwEmNREzA/C3/gAGA560Af4GoAK0AwZibP3IA/j8+lpvAj0AAAAAAQCkArQD/gasAA0ALEAUAgQEDQcLCA0IDg8ECw0FAEkIDU4APzM/MhI5ORESATk5ETMzETMRMzEwEzMRFAcBMxEjETQ3ASOkoAcCEbCgBv3wsAas/dOFVAMG/AgCJ5pF/PoAAAIAcQKoBEIGugALABcAIEAODAYAEgYSGBkVCUoPA08APzM/MxESATk5ETMRMzEwARQAIyICNTQAMzIABRQWMzI2NTQmIyIGBEL+/Obq/QEA6egBAPzfoZaan52amaAEsvf+7QEQ+voBDv7s9MHExMG9xMIAAgBtAqgDqAasABwAJABBQCAJIRQdHRIaFwYDDCEhAxcSBCUmCRQjIwAADwQYSR8PTwA/Mz8zEjkvMxI5ORESARc5ETMRMxEzETMRMxEzMTABMjY1NTMVFAYHFhYVFAYjIiY1NDcmJjU1MxUUFgMUMzI1NCMiAghfaKZCTlpp38O54L5FRqhmk+3y8u0FWmpfiYldgSgjjGqiur+dzk0mhliJiWFo/qrX188AAAIApAK0A1QGrAAJABIAMEAYCgUFBgAOBg4TFAqfBK8EAgQEBwZOEgdJAD8zPxI5L10zERIBOTkRMxEzETMxMAEUBiMjESMRISABMzI2NTQmIyMDVN7KYKgBIQGP/fhSh4NyfG4Fe56o/n8D+P4OVmVYWAAAAgCkArQDrgasAAgAFQA4QBoTEBIEAAoKCxAECwQWFxIAAAkJDBQLTggMSQA/Mz8zEjkvMxI5ERIBOTkRMxEzETMRMxEzMTABMzI2NTQmIyMRESMRITIWFRQHASMDAUyFcGducX2oAS3HvMUBH8X1BNFXV1lL/in+aAP4kZjATP49AZgAAQAnArQDSAasAAcAIEAPAAEGAQMDCAkBTgcDAwRJAD8zETM/ERIBFzkRMzEwASMRITUhFSECDKj+wwMh/sQCtANviYkAAAAAAQCaAqgDzwasABAAIEAOCgcBDwcPERIQCEkMBE8APzM/MxESATk5ETMRMzEwAREUBiMiJjURMxEUMzI2NREDz9nFwtWo9XR+Bqz9c67JyLMCif1z8n11Ao0AAAABACUCtAVeBqwAGAAiQBAJGBkaBA0UAwgXEAlJAQhOAD8zPzMzEhc5ERIBOTkxMAEjAyYnBgYDIwEzExYXNjcTMxMWFzc3EzMESKLJGAcDCNig/uqyngkVDRa0pL8PFA4IqLACtAKSUi0XJv0sA/j9miJsVEICXv2eNmJKKAKIAAIAWgKoAuwFwQAYACMAQUAjEiEHChwYAQEcBwMkJQEEHQoKAeoK+goCCgoUAE4OFEwZBE8APzM/Mz8SOS9dcTMRORESARc5ETMRMxEzMzEwAScGBiMiJjU0JTc1NCYjIgYHJzYzMhYVESUyNjU1BwYGFRQWAmoSLnFbe4kBgW9JQzd0NjGOjZSR/o9gbVx5b0ICtFY0Lnpt7gwEH1ZCKRd1SH2K/fpxaV4zBAVCRzYyAAAAAAIAiwKnAx0FwQAYACMAPUAgEgcBFwchFyEkJQEECh8dLx0CDB0BHR0UBBhLFE8ZBEwAPzM/PxESOS9dXTMRORESATk5ETMRMxEzMTABFzY2MzIWFRQFBxUUFjMyNjcXBiMiJjURBSIGFRU3NjY1NCYBDBUzc1R6iP6Bb0lCOHI4MYyPlJMBc2BtWnpwQgWzVDcrfG3uDAQfVUIoGXdHfIoCBm9qXjQEBkFHNjQAAAAAAgBoAqgDMwXDAA8AHAAwQBgaAwsIDQ0TAwMdHggNBglLDE4XBkwQAE8APzI/Mz8/ETk5ERIBFzkRMzMRMzEwASImNTQ2MzIXNzMRIycGBicyNjU1NCYjIgYVFBYBtJ6usZ2EYA6Liw4pbDxtWWBoVmFcAqjQu7/RdWj8/mk4PX97exSUfY6Df4sAAAADAGICqATLBcMAJgAxADcAYUA1CQAbNRQuBAQWACcnFjQUBDg5Ig4RHwM1LhwVLBUCuRXJFdkVAxUVER8qGBgkH0wGMjIMEU8APzMzETM/MzMRMxESOS9dccUzMhESOTkREgEXOREzETMRMxEzMxEzMTABFAUHFRQzMjcXBgYjIicGBiMiJjU1ISYjIgYHNTY2MzIWFzYzMhYHNCYjIgYVFTc2NgEyNyEWFgTL/pFsjV2CMzicSKZIJnVKka0B6BC5RXZBQXZJYJYrYMF5i6g+M1llVnBp/X2PF/7GBEwE2fAKBCGVQXQgKnc4P7mfWOcgHYkgGFJKnHhwNjVqXTMEBEH+kLhWYgACAJECqANeBukAEQAdADBAFw8ICAoDGwobHh8PCAoLRgpOEgBMGQZPAD8zPzI/PxE5ORESATk5ETMRMxEzMTABMhYVFAYjIicHIxEzERQHNjYXIgYVFRQWMzIRNCYCDp2zs52VVBd9pggvZjZmXV9otloFwc2/vdBzZwQ1/vRVPDs6f3aIFZB4ARCFhgAAAAACAGgCqAMzBukAEgAeADRAGhwDCxUOCRAQFQMDHyAQCQYMRg9OGQZMEwBPAD8yPzM/PxE5ORESARc5ETMzETMRMzEwASImNTQ2MzIWFyY1ETMRIycGBicyNTU0JiMiBhUUFgG0nq6xnU5pKQmmiw42azDGYGhWYVwCqNC7v9E9OE1IAQb7y2lANX/VNZR9joN/iwAAAgBoAqgDGwXDABMAGgBCQCYSCgsDChgDGBscCwsXAeoX+hcCqBcBHxcvFwIMFwEXFxQGTA4ATwA/Mj8zOS9dXV1dcTMREgE5OREzETMRMzEwASImNTQ2MzIWFRUhFhYzMjY3FQYDIgYHISYmAey2zsClm7P99QhxZ0V7SHW4TlwNAVsETwKo0Le33b+eWHB1IB+NMwKcYVhUZQAAAgBiAqgDFAXDABIAGAA6QCAQFgkDCxUJFRkaFhwKLAoCuQrJCtkKAwoKBg4ATBMGTwA/Mz8yETkvXXEzERIBOTkRMzMRMzMxMAEyFhUUBiMiJjU1ISYmIyIHNTYTMjchFhYBkbTPwqKbswIIBnJndJJwvZge/qQEVwXD0Li427ygVnF0PYs2/WK4WGAAAAAAAQBYAqgCxQXDACAAXUA5EB4EDh4TGQkJARMOBCEiEAICDCABuiDKINogA+ogAfsgAakgAZggAR8gLyACCyABICALHBZMBgtPAD8zPzMSOS9dXV1dXV1xcTMSORESARc5ETMRMxEzETMxMAEVIyIVFDMyNxUGIyImNTQ3JiY1NDYzMhYXByYjIhUUMwI5e8K2dZVspqCysEJVrI1JgVE+dGuYxQSFgXFuSI43emmKMRRXP2FyGSV7OFtiAAABAEgCqAK4BcMAHwBdQDkPAggWDQIRGxsCHhYEICEPHh4MHwG6H8of2h8D6h8B+x8BqR8BmB8BHx8vHwILHwEfHxQECkwZFE8APzM/MxI5L11dXV1dXXFxMxI5ERIBFzkRMxEzETMRMzEwATI1NCMiBgcnNjMyFhUUBxYVFAYjIic1FjMyNTQjIzUBO8eaO2c/OYabjqOYsLOlrGyVgbnFewSFYlscGns8cGF9KS2Sans1jUVxboEAAgBmAWgDMQXDAAwAKAA4QBwiChUdAxoPDygVAykqGg8SGBtLBxhMABJPJSBNAD8zPzM/Mz8REjk5ERIBFzkRMzMzETMzMTABMjY1NTQmIyIGFRQWBTQ3BgYjIiY1NDYzMhc3MxEUBiMiJzUWMzI2NQHHaGBgaldgXgEhBCtqSJ6wspyMXAyJsLixdIilWGQDJXR+HI6BjoOCilw1Gzk4z7y513Vo/QCmqDaTSGZdAAACAIkBngFCBbYAAwAPAC5AGgIEBAMKChARDQAHEAdwB4AHkAcFBwMASwNOAD8/EMRdMhESATkRMzMRMzEwEzMRIxcUBiMiJjU0NjMyFpOmpq82KSkxNyMpNgW2/P64MiwzKywuLQAAAAEAkQK0AzcG6QAOADtAHgANCQkKBAcDBQUHCgMPEAcEAAMICAIKC0YCSwYKTgA/Mz8/ERI5ERczERIBFzkRMxEzETMRMzMxMAE3NzMBASMBBxEjETMRFAEzPPfB/r4BUsL/AD6mpgRxR/7+tP5KAVAl/tUENf3cMAAAAAABAJECtAT+BcMAHgA9QB4TDw8QBwgeAAAIEAMfIBgTEBYRSwAIEE4DCwsaFkwAPzMzETM/MzM/ERI5ORESARc5ETMRMxEzETMxMAERNCMiBhURIxE0IyIGFREjETMXNjYzMhc2MzIWFREEXIdlVaSJXlmmjgwubEGtQVafi4oCtAHsonhw/loB7KJ4iP5yAwJyRziKioiX/hAAAAABAJEBaAM9BcMAGgAyQBkSDg4PGAcHAg8DGxwSDxQQSw9OChRMBQBNAD8yPzM/PxESORESARc5ETMRMxEzMTABIic1FjMyNRE0IyIGFREjETMXNjMyFhURFAYCXjlAQShMlmtipo4MYpKNkWwBaBd/FWcCTqR3h/5wAwJqd4iV/a5tfwACAGgCqANQBcMACwAWACBADgwGABEGERcYFAlMDwNPAD8zPzMREgE5OREzETMxMAEUBiMiJjU0NjMyFgUUFjMyETQmIyIGA1DHsKbLxbCqyf3AY2jNY2plZgQ3vNPVurrS1raAkAEQfY6JAAABAEYCqAKeBcMAFAAgQA4UCgUQChAVFhICTA4ITwA/Mz8zERIBOTkRMxEzMTATNjMyFhUUBiMiJzcWFjMyERAjIgdWVn21wL2yhmMxKmMly8licwWTMNO/us8vgRQZAQgBCjUAAQBoBDUDUAXDAAwAF0AJBgwNDgAGAwlMAD8zxDIREgE5OTEwATQmIyIRIzQ2MzIWFQKoaGXLqMavqcoENYSJ/vO61Ni2AAAAAQBoAqgDUAQ1AAsAF0AJCwUMDQQLAghPAD8zxDIREgE5OTEwARAzMhEzFAYjIiY1ARDLzajGsafKBDX+8gEOutPUuQACAJEBaANeBcMAEgAeADBAFxAJCQ0DHA0cHyAQCQYOSw1NEwBMGQZPAD8zPzI/PxE5ORESATk5ETMRMxEzMTABMhYVFAYjIiYnFhURIxEzFzY2FyIVFRQWMzI2NTQmAhCdsbOdTWooCKaMDjFuMsdgaVtdXQXDzr7Azz04SE7+4QROaD04gewdlH6Ufn+KAAABADECqAIjBmAAFwAuQBYMChEVFQoTBAoEGBkPFAsOAxFLAAdPAD8zPxczzRESATk5ETMRMxEzETMxMAEyNjcVBgYjIiY1ESM1NzczFTMVIxEUFgGkG00XGlQmfHVtcTNt3d02AyUQCHoMD3p4AaFOM6Sqe/5jPjsAAAAAAQCLAqgDNQW2ABMAKUATARIKBwwSDBQVDA8IE0sLTgQPTwA/Mz8/MxI5ERIBOTkRMzMRMzEwAREUFjMyNjURMxEjJwYGIyImNREBMUhMa2GkiwwteUSTlgW2/hVUUHqEAZH8/nE5RIWUAfUAAQBYAtkDZgWDABMAKkASDAQLDwgTDwQTBBQVABMMCAgJAC8zETMvMxESATk5ETMRMxEzETMxMBMhMjY1NCYjITUhFQcWFhUUBiMhWAHsU1B5hf5vAwJxOUSFk/4KA39IS2tipIsMLXlElJUAAQCLAqgE+gW2AB8AOUAcAR4JBhEOExMGHgMgIRMYGw8HH0sSTgsDAxYbTwA/MzMRMz8/MzMSOTkREgEXOREzMxEzETMxMAERFDMyNjURMxEUMzI2NREzESMnBgYjIicGBiMiJjURAS+HZFejiF9Zpo0NLmxBqkMphEuJjAW2/hWkd3ABqP4VpHeJAY/8/nFHNodEQ4aXAfEAAAAAAQASArQDGwW2AAoAGEAKAQkLDAgBSwUATgA/Mj8zERIBOTkxMAEBMxMWFzY3EzMBAT3+1a+nGBYQH6Wx/tECtAMC/kA+bl1PAcD8/gAAAAABAA4CqALLBcMAIQAoQBQOGggAGhwTBSIjDhEfTAUWFgoRTwA/MzMRMz8SORESARc5ETMxMAEUBgcWMzI3FQYjIiYmJwYGIyInNRYzMjY2NyY1NDYzMhYCdVlNSlQyLCRFO0hJJzluT0YlMSkmLjUbpJJ1dJIEukWnQ2QMdxQZODBCPxR5DhQsJJWcc5SUAAACAJEBaAOBBvIAEQAkAD9AHwUiHRUNDQ4DIgcZGSIOAyUmBR0dHh4KDk0SAEcXCk8APzM/Mj8ROS8zEjkREgEXOREzETMRMxEzMxEzMTABMhYVFAcWFRQGIyInESMRNDYXIhURFjMyNTQmIyM1MzI2NTQmAfqhucPwwK1zaqa9qsFoadV4a15QXGZjBvKShbcyL+qUnTX+iwQ+nq6B0f24M75iY4FYVE5OAAAAAQAZAWgDHwW2ABAAH0APDQQBBQQREgQJAQwFSwFNAD8/MxI5ORESARc5MTABIzQ2NwEzExYXNjcTMwEGBgG6rich/sWspS0PDyacqP7hISUBaDrHYALt/nFyRkhsAZP9GVbIAAIAZgKoA04G6QAdACkAOEAbDBgDEiQAEgYYHh4GAAMqKxUhAwMbDwlGJxtPAD8zPzMSOREzMxESARc5ETMRMxEzETMRMzEwEzQ2NyYmNTQ2MzIWFwcmIyIGFRQWFxYWFRQGIyImJTQmJwYGFRQWMzI2ZpGGU0uVfkuWXkiMcTUwQ2+Wf8ewp8oCQFZMeHxtWmRrA+x0rywzaEVibCMsfU0tIipEOlGocqW7sLBYbiYajGVZaXIAAAACAGgBaAP6BcUAFwAgADxAHgcACgQeFxcMABMYGAAEAyEiBkwATRsQTB4MDBYBTwA/MzMRMz8zPz8REgEXOREzETMzETMRMxEzMTABESYmNTQ3FwYGFRQXETQ2MzIWFRQGBxETNCYjIhURNjYB17S7oH85QsuEcoqj0K/XSjlUZHMBaAFAE8+vz71WRpZe7CIBj4SJ0quy3BD+wALefX+K/nESlgAAAAABAAIBaANIBbgAIAAvQBgIGAcPDxgeFwQhIgUVEQZLF00cAEsMEU0APzM/Mj8/Ejk5ERIBFzkRMxEzMTATMhYWFxcTMwETFhYzMjcVBiMiJicnAyMBAyYmIyIHNTauLTwuJVDLsP7Pfx0wJCQwLz5ZaixU6K4BTHMUKR8bIi8FuBw6WsYBdP3q/r5HMAt5EVJx3f5gAkYBITM3CHkOAAAAAgCJ/2ABQgN5AAMADwAsQBkKAAAEAQEQEQ0PBx8HfwePB58HBQcCUgFQAD8/xF0yERIBOREzMxEzMTAFIxEzJzQ2MzIWFRQGIyImATempq42KCQ3NyQoNqADArkxLS0xLS0tAAEAkf9gAnkCbwAPACVAEQ0JCQoKAhARDQoLUgpQBQBTAD8yPz8RORESATk5ETMRMzEwATIXByYjIgYVESMRMxc2NgIIMj8TMzFWdaaQCC9jAm8NjQ6EZf5mAwKhY0sA//8Ai/9VAzUCYwEHBeAAAPytAAeyAAtQAD81AP//ABL/YQMbAmMBBwXjAAD8rQAHsgAKUAA/NQD//wCR/hUDgQOfAQcF5QAA/K0ACbMBAApRAD81NQAAAP//ABn+FQMfAmMBBwXmAAD8rQAHsgAFUgA/NQAAAgCL/hQDTgJvAA8AGwA4QB8FFAgICQAZCRkcHTAJkAmgCQMACdAJAgkDEA1TFgNRAD8zPzMQxF1xERIBOTkRMxEzETMzMTAlFAYjIicWFRUjETQ2MzIWJSIGFRUWMzI2NTQmA066qG5TBqa6rZ+9/p5gW0hwZVpZ4b/OLzhk0wLVvcnUU4GG2TuJhYmEAAD//wBo/hUD+gJyAQcF6AAA/K0ACbMBAAFRAD81NQAAAP//AAL+FQNIAmUBBwXpAAD8rQAHsgAGUgA/NQAAAgCm/+wHCAReACUALABnQDgjGwwJHBIbKioSCQMtLgMVABcpHF5ZGSkBAw8pARAGKSkAFxMKDxcmXVkXEAAfYVkAFgYPXlkGFgA/KwAYPysAGD8rABg/MxESOS9fXl1fXSsREgA5ORESARc5ETMRMxEzETMxMAUiJicGBiMiJjURMxEUFjMyNjURMxU2MzISFRUhFhYzMjY3FQYGAyIGByEmJgVviMpINMGC0ee0hIZ9h7V1ss/1/REFtKVYm21YopuBlw0CLwKKFF5qX2PXwgK//T19h4SAAsNzh/7z4m27wh8tniYhA9+mlJqgAAAAAv+8/+wEewYUACwAOQCCQEscAgUSEhULNzcqFQM6OwIWABgkISYpHBiwH8AfAi8fAQ8fHx8vHwMJH0AmDwAfAC8AAwwDAAAiEQYVCCIAFRUILV1ZCBAONF1ZDhYAPysAGD8rABg/PxESOTkSOS9fXl0zGs1eXXFdMjMyETk5ERI5ORESARc5ETMRMxEzMzIxMAEiJxUUBzM2MzISERACIyImJyMGByMRJiMiBgcjNjYzMhc1MxUWMzI2NzMGBhciBhUVFBYzMjY1NCYBmhwcCgpv5dns8NVvrjcOHwaBIQYrKwxpC2VVFxa0HhYpKw5mCmaqppCTp5SRkgSiChJxcaT+1f70/vD+11BPeBMFDgk7PHqMBnTVDjs8eozbvOAI4cHZzdDQAAACAHH/7AUtBhQAKwA4AItAUCk6NgsjAyAULwMRBQUvGwsEOToCFQAXIyAlKBewHsAeAi8eAQ8eHx4vHgMJHkAbJQ8AHwAvAAMMAwAAIQYQCA4hAAQVDjNdWQ4QCCxdWQgWAD8rABg/KwAYPz8REjk5EjkvX15dMzMazV5dcV0yMhE5ORESOTkREgEXOREzMxEzMxEzETMRMzEwASInESMnIwYjIgIREBIzMhczJiY1NSYjIgYHIzY2MzIXNTMVFjMyNjczBgYBMjY1NTQmIyIGFRQWBGgVFpEbCHPj1uzt1913DQMKIxArKwxpC2VVHRy0EBcqKw5mCmb9k6SXmaSLmJcEogb7WJOnASgBDwENAS6iFHkVqg87PHqMCnjbCDs8eoz737PMIeXD3c3M0gAAAf/yAAADGQYfAC0AeEBDGi8LJSEDAxQQBCMrBBIELi8CBQAHJRAnKgewDgEPDh8OLw4DDkALJwAAEAACCQMAACEEFRgdXVkYASQSIRJkWRQhDwA/MysRADMYPysAGD8SOS9fXl0zMxrNXV0yMhE5ORESOTkREgEXOREzMzMRMzMyETMxMAEiJxEjESYjIgYHIzY2MzIXESM1NzU0NjMyFwcmIyIGFRUhFSERFjMyNjczBgYBzyActBIRKysMaAtkVRQVwMCvtmlsMF1GW1gBFf7rHhoqKg5mCmgBqAz+TAIXBjs8eowGARhUPj/IyCWNHniCR4z+iA87PH6IAAAAAAP/4QAAB6IEXgArADQAPQCVQFYnPw8VNQkJEgosBAQ3BSMrKy4AAAUKAz4/CwYDAwEIEjcsIwQuJggPNR81LzWvNQQ1QCoPLgABEAFAAaABBAkDARYbAQMKGRMPAAUKFTE7GTtdWR8ZEAA/MysRADMYPzMzPxESFzkvX15dMzMzGs1dMjIRFzkREhc5ERIBFzkRMzMRMxEzMxEzETMzETMzMhEzMTAhESIlESMRJCcRIxEGBgcjNjY3ETMXMzY2MyAXMzY2MzIWFRU2NjczBgYHEQEEFzUQIyIGFSUWBTU0JiMiBgYjuP6ws/7i6LQtLwhpEGBdkRsKL6tqAQJOCjW3dLq5MisJZwthYf1GAULG35mQ/UnBAUNtdIeNAZMr/kIB1yoF/foB/AsuMmB4GgHHllBauFhgwNOeDDUsYXoZ/loCUCkEoAEEsrc6Ay1bgoKSAAAC/+EAAAUZBF4AIAArAH5ASB0tCxEhBgYOBxkgICUABwAsLQgFBRwOPyEBryG/IQIhQAkMSCEfCyEDARklJQABEAFAAaABBAkDAQERBxUPDwAHFRUpXVkVEAA/KwAYPzM/ERI5OS9fXl0zETMRFzMvK11xMzMzETMREgE5OREzMxEzETMzETMzMhEzMTAhESYnJicRIxEGByM2NjcRMxczNjYzMhYVFTY2NzMGBxEBFhcWFzU0JiMiBgOaYbrAXbRaCmkOY1yRGwozuG/KxDAuCGcPvv0YXbPGYHd/lpsBmAgpKQz+AgIEEGNweBQBvZZRWcTPpgY6Ndgm/mQCkQkmKwyThoOTAAAC/7b+FAR7BF4ALAA5AIFALAsxJBMgBAMDEAQaNzcqBAM6OwIFAAckECYpB7AOAQ8OHw4vDgMJAw5AJgsAuP/AQBoJDkgAAAQTIB0XEQ8EGxctXVkXEB00XVkdFgA/KwAYPysAGD8/ERI5ORE5LyszMxrNX15dXTIyETk5ERI5ORESARc5ETMRMzMRFzMyMTABIicVIzUmIyIGByM2NjMyFxEzFzM2NjMyEhEQAiMiJyMXFhUVFjMyNjczBgYTIgYHFRQWMzI2NTQmAZMOI7QaEysrDGkLZVUgE5QYCECobtbt7tfddwwECBYXKisOZgpmsKORApSmipub/qIIlvgLOzx6jAgErJZaUP7X/vL+8/7SnylOPZoMOzx6jAUluMUj38fgyMnVAAH/uAAAAy8EXgApAGxAPQsTIQMDEAQZJwQDKisCBQAHIRAjByawDgEPDh8OLw4DDkAjCwAAEAAgAAMJAwAAERMEFxEPBBUXHGJZFxAAPysAGD8/ERI5ETkvX15dMzMazV1dMjIROTkREjk5ERIBFzkRMzMRMzMyMTABIicRIxEmIyIGByM2NjMyFxEzFzM2NjMyFwcmIyIGBhUVFjMyNjczBgYBlhsZtBcUKysMaQtlVQ0klBQIP6xlSTgWPTpXlVQZFiorDmYKZgFqCf6NAdUKOzx6jQkB5MltcAymDmCpZ0QMOzx5jgAB/7gAAAKgBF4AJgBhQDcHGiYmDAASIAADJyglASMDGgwcAx+wCgEPCh8KLwoDCkAGHAAjECMgIwMJAyMjEAAVEBZhWRAQAD8rABg/EjkvX15dMzMazV1dMjIROTkREjk5ERIBFzkRMzMRMzIxMDMRJiMiBgcjNjYzMhc1NDYzMhcHJiYjIgYVFRYzMjY3MwYGIyInEagYDSsrDGkLZVUXFJquUl4XGk44SEchFiorDmYKZlQdHwHXCDs8eo0HoK+nIZkIF1pj/g47PHmODf6JAAAAAAH/9P/sA90EXgAyAGJANC80FRcZHjExAAUkGQALGQszNAAUQBQCFA8uPy4CDQMuLicAJBQPCxkcIV1ZHBADCF5ZAxYAPysAGD8rERIAOTkREjk5GC9fXl0vXRESATk5ETMRMzMRMxEzETMyETMxMAEUBiMiJzUWMzI2NTQmJycmIyIGByM2NyY1NDYzMhcHJiMiBhUUFhcWFxYzMjY3MwYHFgN55tDZgLWoiHx3mAaPTDc6DmgTjCvcwLujPaeGcHRkt1ksJiYwNhFmE3QjAS+aqUWkWFhKQVo6AicxRtgmP1iHnEqPRkc+PE9GIhgEMUbGMjgAAAAAAf/V/+wCtgVGACwAdEA8FRsPICQDAxkPIgoKKg8DLS4CEAARJBkmKRGQGAEPGAELAxhAFSYAAA0gHh4dIBojICNkWSAPDQZdWQ0WAD8rABg/KxEAMxEzMxgvERI5LzMzGs1fXl1dMjIROTkREjk5ERIBFzkRMxEzMxEzMxEzMjEwASInFRQWMzI2NxUGBiMgETUnIgYHIzY2MxcRIzU3NzMVIRUhERYzMjY3MwYGAbIeI1tRI14YGWk2/r4cLCoNaAtlVSKbnUhrAT3+wyQZKioOZwtkAagOcl9mDgmKCxUBU9oEOzx6jAQBFlZI6vyM/owTOzx4jgABAEYAAANzBEoAIQCGQEsUGQMIDgYCCBkTGB8FBRMIFQYFIiMCCAAKGRMbHgqQEQEPEQELEREOGw8AHwBPAK8AvwAFEAMAAAYYFRYWFWRZFg8HAwYGA2RZBhUAPysREgA5GD8rERIAORI5GC9fXl0zMzMvXl1dMzMSOTkREjk5ERIBFzkRMzMRMxEzETMRMxEzMTABIicDIRUhNQEmIyIGByM2NjMyFxMhNSEVARYzMjY3MwYGAqpBZeUCVPzdAR0wLSsrDGgLZFVDb+L91QLx/vIqJioqDmcLZAGoKf66i3cBlRE7PHqMLwFBjIf+fws7PHiOAAAAAAIAsP4UBH0EXgAMACgAWEAuIxQNBA4DGhocFAocCikqGQ8XEREAXVkAEQETAxERFyAgJV1ZIBAcFxcHXVkXGwA/KwAYEMQ/KxESADkYL19eXSsREgA5ORESATk5ETMRMxEXMxEzMTABIgYVFRQWMzI2NTQmJQczNjMyEhEQAiMiJyMHIxE0NjMyFxUmIyIGFQKepZmaqIyVlP4zBghv5dPy8NPfeQoZj/D875yi9I2iAfSxyCvdx9/HzNZ0h6b+2f7w/vb+zqaRBFzr7kWmVqaQAAAAAQCkArQDgQW2AAsATEAtCAQEBQAJAQUBDA0DeQiJCALiCPIIAqwIAQSZCAEPCB8ILwgDCAgGAQVOCgZLAD8zPzMSOS9dXV9dXXEzERIBOTkRMzMRMxEzMTABIxEhESMRMxEhETMDgaj+c6ioAY2oArQBUv6uAwL+1wEpAAIAVv4UBE4ESgATAB4AZ0A2BhoKDgQUBxQADhoaCwMABB8gBAULCxEIDB0DAx1fWQADAQsDAwMRCgcICAddWQgPERddWREbAD8rABg/KxESADkSORgvX15dKxESADkREjkRMzMREgEXOREzETMzETMRMxEzMTA3NDY3JzUBITUhFQEFFhEUACMiADcUFjMyNjU0JiMgVu3ayQGT/bMDbv4hAVbP/vHx5f7tuqSenaSlnv7ACs74F4o/AQKYg/7G4Yj+5un+8wES5K2zs62srgABACH/ZgcvBhQANQCMQEwuAC8ICygYHRsiJigoGxYZJBEsCAgpCTUAAAkRGRsFNjcsLykKKAsGDS0xBF1ZMRAtKgAYGAAJFSAgHyIcJSIlZFkiEBkUFA1dWRQWAD8rEQAzGD8rEQAzETMzGC8/MzMvPzM/KxESABc5ERIBFzkRMxEzMxEzETMRMxEzETMzETMzETMRMxEzMTAhETQmIyIGFREjEQEWMzI2NxUGBiMiJwcjNyY1ESM1NzczFSEVIREUFwERMxEBMwE2MzIWFREGfXd/qJm1/kAfLSNeGBlpNllBfZ22RJudSGsBPf7DDAIUtQFNnv5acKPHygK+hoO61v3JAsP9yg4OCYoLFRqg6FGgAn9WSOr8jP2GNCQCnwKJ/lsBpf3sXL/S/TUAAAAAAQAUAAAB/ARKAAsAPEAfAgQHBQAEBAkFBQwNAwcIB15ZAHkIiQgCCAgFCg8FFQA/PxI5L10zKxEAMxESATkRMzMRMxEzETMxMAEzFSMRIxEjNTMRMwFimpq0mpq0Ao2R/gQB/JEBvQABAAr/7AKgBEoAFwBHQCYUEgEFBRYSDAMSAxgZBBQVFF5ZAQ8VHxUCEAUVFRgXDw8IXVkPFgA/KwAYPxI5L19eXTMrEQAzERIBFzkRMzMRMxEzMTABETMVIxUUFjMyNjcVBgYjIiY1NSM1MxEBWs3NSlQsYhobcDakk56eBEr+Q5G4Y2IPCIoMFKqsupEBvQADABT+FATXBF4AGwAiACkAdUBDGxkmIAQUBBgYARkNDg4LHycZBSorFAQRCA4gGwAbXlkLJjUAAQgAAREAABAAAh0DAAARAg8ZGwgjXVkIEBEcXVkRFgA/KwAYPysAGD8/EjkvX15dXl1dMzMrEQAzMxESOTkREgEXOREzETMzERczETMxMBMzETMXMzY2MzIWFzMVIwYCIyInIxcWFREjESMBMjY3IRYWEyIGByEmJhSalBgIQKhuv+cXYlwK78zddwwECLSaAoiDlwj9pAeVmpKTDwJWEZYCjQG9llpQ7+KP/f7rnylOPf49A+r+g8i1ybQDRpejmaEAAAACABT/8gSwBEoAFAAdAGNANQMFDQsSGxsPCwEFBRMYCxgeHwQaDQ4NXlkBEjUOAQgOAREADhAOAh0DDg4IFBAPCBVeWQgWAD8rABg/MxI5L19eXV5dXTMzKxEAMzMREgE5OREzMxEzETMzETMRMxEzMTABETMVIxUUBiMiJjU1IzUzETMRIREBMjY1NSEVFBYEH5GR7dTR55KStgIO/vp/h/3yggRK/kOPd7va18JzjwG9/kMBvfw5hn53d32HAAACABT/7ATDBEoAIQAqAHxARwAbEyghBBQPDicoKBEEBQUCJhseFxEOCCssBScPEA9eWQIaNRABCBABEQAQEBACHQMQEAkfHhcABBQVFRRdWRUPCSJdWQkWAD8rABg/KxESABc5EjkYL19eXV5dXTMzKxEAMzMREgEXOREzETMRMxEzMxEzETMRMzEwARYXMxUjFRQAIyImJjU1IzUzNjchNSEVBgYHISYmJzUhFQEyNjU1IRUUFgOPli9vW/7u7pLmf11vKZz+9QHkTHwcAlseekoB5P3Tmqf9faUDtHqtjwzx/ut/65oOj655lo4imnNspB+OlvzNv7IMDrC/AAAAAgCu/moEewYUACAALQBQQCwWDg4RHwccKysHAhEELi8WDggeBAoZEgARFRkhXVkZEAooXVkKFgAFYVkAIwA/KwAYPysAGD8rABg/PxESFzkREgEXOREzETMRMxEzMTABIic1FjMyNTUGIyImJyMGByMRMxEUBzM2MzISERAHFRABIgYVFRQWMzI2NTQmAvo8Py44Y0xGb643Dh8GgbQKCm/l2eyS/q+mkJOnlJGS/moZlhNrjxRQT3gTBhT+hnFxpP7V/vT+05bn/u8FXbzgCOHB2c3Q0AAAAgBx/moEmAYUAB8ALABaQDACIyoQGSMcFgoeBwcKIxAELS4WCw0TGgATJ11ZExAJHF1ZCRUNIF1ZDRYABWFZACMAPysAGD8rABg/KwAYPysAGD8REjk5ERIBFzkRMxEzMxEzETMRMzEwASInNRYzMjU1IycjBiMiAhEQEjMyFzMmJjURMxEzERABMjY1NTQmIyIGFRQWA6g8Py44Y0obCHPj1uzt1913DQMKtFv9uKSXmaSLmJf+ahmWE2uPk6cBKAEPAQ0BLqIUeRUBtvqC/uX+7wIXs8wh5cPdzczSAAABAB/+agMZBh8AIQBTQC0TIwIJGh4eDQkgBxwHCQsEIiMRFl1ZEQEdCxoLZFkNGg8JHl1ZCRUABWFZACMAPysAGD8rABg/MysRADMYPysREgEXOREzETMzETMRMxEzMTATIic1FjMyNTUjESM1NzU0NjMyFwcmIyIGFRUhFSERMxEQ/j0+LjhjbcDAr7ZpbDBdRltYARX+61v+ahmWE2uPA75UPj/IyCWNHniCR4z82P7l/u8AAAAAAgBv/hQGDAReACkANgBsQDoWHiM0By0pEB4eDQESGxsBKQcENzgNAgQKEB1eWRAQBA4PCjFdWQoQBCpdWQQWFBlhWRQjISZdWSEbAD8rABg/KwAYPysAGD8rABg/EjkvKxESADk5ERIBFzkRMxEzMxEzETMRMzMRMzEwJTcjBiMiAhEQEjMyFzM3MxEhERAjIic1FjMyNTUhFRQGIyInNRYzMjY1JTI2NTU0JiMiBhUUFgOJBghv5dXv8dHfeQoZjwHR7zw/Ljhi/tHv/PCboPWMo/7FppeYqYqXkwqHpQEpAQ4BCQEyppL8sv5//u8ZlhNr/H/s7kamVqSRoLPGK9zI28vM1gAAAQCu/moEHQYUABsAVUAtBAgMBRsaFhYXBRQIEREUFwMcHRQFGwMVFRcDGAADDxcVEwZdWRMVCg9hWQojAD8rABg/KwAYPz8/ERI5ERczERIBFzkRMxEzETMRMzMRMxEzMTABNzcBMwEBMxEQIyInNRYzMjU1IwEHESMRMxEHAWA9RgFf0v5EAWta8Dw/LjliIf6DfbKyCAI1TlQBc/4r/h7+6P7vGZYTa48CAG3+bQYU/NOyAAAAAAEAUv5qAbwGFAAPAC9AFwYADQILDQsQEQ4ADQBdWQ0VBAlhWQQjAD8rABg/KwAYPxESATk5ETMRMzMxMCUzERAjIic1FjMyNTUjETMBYlrvPD8uOGNttJP+6P7vGZYTa48GFAABAK7+agcvBF4ALgBeQDECCR0ZGRoQESsJLQcHCREaBC8wIx0aIRsPDBUhFV1ZJyEQGhEJCStdWQkVAAVhWQAjAD8rABg/KxEAMzMYPzMrEQAzGD8REjk5ERIBFzkRMxEzETMRMxEzETMxMAEiJzUWMzI1NSMRECMiBhURIxE0JiMiBhURIxEzFzM2NjMgFzM2NjMyFhURMxEQBj87Py44YmrfmZCzbXSYjbSRGwovq2oBAk4KNbd0urla/moZlhNrjwLDAQSyt/2iAsOCgrrU/ccESpZQWrhYYMDT/cj+6P7vAAABAK7+agSmBF4AIABOQCkCCRURERIdCR8HBwkSAyEiFQkZEw8SFRkNXVkZEAkdXVkJFQAFYVkAIwA/KwAYPysAGD8rABg/PxESORESARc5ETMRMxEzETMRMzEwASInNRYzMjU1IxE0JiMiBhURIxEzFzM2NjMyFhURMxEQA7Y8Py45Ymp3f6mZtJEbCjO4b8rEWv5qGZYTa48CvoaDu9P9xwRKllFZxM/9yP7o/u8AAAIArv4UBHsEXgAgAC0AVEAvJQsDAwcHCBUeEisrHhkIBC4vHxQCDAQPAAkPCBsPIV1ZDxAAKF1ZABYXHGFZFyMAPysAGD8rABg/KwAYPz8REhc5ERIBFzkRMxEzETMRFzMxMAUiJyMXFhURIxEzFzM2NjMyEhEQBxUQIyInNRYzMjU1BgMiBgcVFBYzMjY1NCYCtt13DAQItJQYCECobtbtku88Py44Y0xko5EClKaKm5sUnylOPf49BjaWWlD+1/7y/tOW6f7vGZYTa48UA9u4xSPfx+DIydUAAAABAFL+agMvBF4AHQBHQCUQFxoKChcMFQIVFwMeHxsXABgPAAViWQAQFwpdWRcVDhNhWQ4jAD8rABg/KwAYPysAGD8REjkREgEXOREzETMRMxEzMTABMhcHJiMiBgYVETMRECMiJzUWMzI1NSMRMxczNjYCrkk4Fj06V5VUWu88Py44Y22UFAg/rAReDKYOYKln/kn+6P7vGZYTa48ESsltcAAAAAEAaP5qA3kEXgAvAFdALSMABykRKR4DDAAXFwweAzAxAg0sDywAKRoXHiEmXVkhEA8UXlkPFgUKYVkFIwA/KwAYPysAGD8rERIAORESORESOTkREgEXOREzETMRMzMRMxEzMTABFAcVECMiJzUWMzI1NQYjIic1FjMyNjU0JicuAjU0NjMyFwcmIyIGFRQWFx4CA3lx7zw/LjhiSlnZgLWoiHx3mJt+O9zAu6M9p4ZwdGS3iYM+AS+ZVMf+7xmWE2uLEEWkWFhKQVo6PFVqTIecSo9GRz48T0YzWG4AAAAAAf/F/hQDMwYfACQATUAoEh4LBgAODhgCCxgLJSYADV5ZAAAlGxshXVkbAQQJYVkEIxAVXVkQGwA/KwAYPysAGD8rERIAORgvKxESATk5ETMRMxEzMxEzMjEwJSERECMiJzUWMzI1NSERECEiJzUWMzI2NREQITIXFSYmIyIGFQFiAdHvPD8uOGL+0f7yXTIvO0g3AQ5cNBE+HEg3/P5//u8ZlhNr/P79/qohiRZZbAVcAVYhiQgOWWsAAAABAAD+agQQBEoAGQA/QCALGwASDAwFDhcXBQQDGhsEGQoADxkMXlkZFRAVYVkQIwA/KwAYPysAGD8zEjkREgEXOREzETMRMzIRMzEwETMTFhczPgI3EzMBIREQIyInNRYzMjU1IcHpRRMIAwkMROrA/pQBH/A8Py44Y/5/BEr9ecNgDSEnzgKH/EX+7P7vGZYTa48AAQAl/moEBgRKABcAR0AnBQkBFwMVCRISDQYVABcGGBkVAwYDFwQBDxcVFAddWRQVCxBhWQsjAD8rABg/KwAYPz8zEhc5ERIBFzkRMxEzETMRMzEwAQEzAQEzAQEzERAjIic1FjMyNTUjAQEjAbL+hc0BGwEYy/6FASVa7z0+LjhiGv7V/tHLAjECGf5iAZ795/5l/uX+7xmWE2uPAbb+SgAAAAABAFD+agNzBEoAEwBFQCUEEQ0ACQkNEg4LBRQVEQ4PDw5kWQ8PDBILCxJkWQsVAgdhWQIjAD8rABg/KxESADkYPysREgA5ERIBFzkRMxEzMzEwBRAjIic1FjMyNTUhNQEhNSEVASEDc/A8Py45Yv1/Ak791QLx/bsCVIX+7xmWE2uPdwNHjIf8yAAAAAIAXv5qBPoEXAAoADMAcUA/JzUYMTENECwfBiEDAwYsDQQ0NQcFGxAtYFkPEB8QfxADHQMQEAUbGxReWRsQBR9dWQUVCileWQoWACRdWQAjAD8rABg/KwAYPysAGD8rERIAORgvX15dKxESADkREgEXOREzETMRMxEzETMRMzEwASImNTUjJyMGBiMiJjUQJTc1NCYjIgYHJzY2MzIWFREzERQWMzI3FQYBMjY1NQcGBhUUFgR3dXM7IwhSo3yiuAIPumx3V5tEN1PEYMfCWjY1LjAq/RGXraK9rWn+ao6HgZxnSaqbAU4QB0F9dzQghywysMD9qv7rSDsSjRkCE6OWYwcHanJWXAAAAAACAHH+agVgBF4AHQAqAFdALhQsKAMMIQkbDhgYGwMDKywbCAAGCg8GJV1ZBhAaDF1ZGhUAHl1ZABYWEV1ZFiMAPysAGD8rABg/KwAYPysAGD8REjk5ERIBFzkRMxEzMzMRMxEzMTAFIgIREBIzMhczNzMRMxEUFjMyNxUGIyIRNSMnIwYnMjY1NTQmIyIGFRQWAjPW7O3X3XcIHY9bNTUxLSpZ50obCHPGpJeZpIuYlxQBKAEPAQ0BLqKO/Ez+60g7Eo0ZARWBk6eVs8wh5cPdzczSAAIAcf4UBQgGHwAMADUAV0AuHysrNwoQGQMvJRYzMy8QAzY3NBUNExwhYVkcARMHXVkTEA0AXVkNFi0oXVktGwA/KwAYPysAGD8rABg/KxESADk5ERIBFzkRMzMRMzMRMxEzETMxMCUyNjU1NCYjIgYVFBYXIgIREBIzMhczJiY1NRAzMhcVJiMiBhURFBYzMjcVBiMiETU0NjcjBgJQpJeZpIuYl3PX7e3X3XcNAwr4SD8vOy8yLThAJipl8AoDDXaBs8wh5cPdzczSlQEqAQ0BDQEuohR5FaIBHxuVFDZB+mFwVRaJIQFWghh3EqEAAAIAcf5qBLIEXgAeACUAWUAxGCcLAxIcCiMjHAMDJiciC15ZGSIBAw8iARAGIiIABgYfXVkGEAAOYVkAFhoVXVkaIwA/KwAYPysAGD8rERIAORgvX15dX10rERIBFzkRMxEzETMRMzEwBSIAERAAMzISFRUhFhYzMjY3ERQWMzI3FQYjIhE1BgMiBgchJiYCgff+5wEG38/2/RAFtKVYnmo2NTEtKlnnWpqBlg4CLwKKFAErAQYBCAE5/vXkbbvCHy3+sEg7Eo0ZARV/EgPfppSaoAAAAQBY/moEXAReADAAekBEETIoCwEVHx4eLgQbLiILFRUiGwMxMh8CMDACXVlFMAEZMAEIMOgwAhAPMAEUAzAwGCUlLF1ZJRAYB11ZGBYTDl1ZEyMAPysAGD8rABg/KxESADkYL19eXV5dXV0rERIAORESARc5ETMRMxEzETMRMxEzETMRMzEwARUjIBUUFjMyNjcRFBYzMjcVBiMiETUGIyImNTQ2NzUmJjU0NjMyFhcHJiYjIhUUIQLXlf7KlI9Vq2Q2NTEtKlnnWnPc8XGDY2rnv2+tV0RjhEr4ATkChZO9WV0nL/6sSDsSjRkBFX8Sq5RjgyYLHIBdh5wlKY8sHJyoAAABAET+agOPBF4ALgB3QEMIIw8QEAItGRkjDQITKSkCHyMELzAPLS4uLV1ZRS4BGS4BCC7oLgIQDy4BFAMuLhYKCgRdWQoQFiZdWRYWIRxdWSEjAD8rABg/KwAYPysREgA5GC9fXl1eXV1dKxESADkREgEXOREzETMRMxEzETMRMxEzMTABIDU0IyIGByc2MzIWFRQHFRYWFRQGIyInFRQWMzI3FQYjIhERFjMyNjU0JiMjNQGHATf5T4hfP6vUwdrOfXb623VgNjUxLSpZ57e9jZian5QChaicHiiPTJqHuzgIJIhnl6wSfUg7Eo0ZARUBVlZeXF5bkwAAAAACAGj+agV1BF4AIQAoAG9AQgsqHiYWBQMPDxglFgQpKhAmFyZeWQMJFwESDxcfFwIPFy8XPxdPF38XBRMDFxcTAAAbYVkAEBMiXVkTFg0IXVkNIwA/KwAYPysAGD8rERIAORgvX15dcV5dMysRADMREgEXOREzMxEzMxEzMTABMgAXMxEUFjMyNxUGIyIREQYGIyICNTUhJiYjIgYHNTY2EzI2NyEWFgIC7wEaB5o2NS4wKlnoHv/Az/YC8AW0pViealugmoGWDv3RAogEXv7l+/05SDsSjRkBFQI12PABC+RtusMfLZ4nIPwhppOXogACAKD+agKFBeUAEAAcAEdAKAgeAA4CDBcMDhEEHR4UGmNZYBQBDxQBDAMUDw8OAF1ZDhUKBV1ZCiMAPysAGD8rABg/xF9eXV0rERIBFzkRMxEzETMxMCUzERQWMzI3FQYjIhE1IxEzAzQ2MzIWFRQGIyImAWJaNjUxLSpZ5220wj0tKj8/Ki09lv7rSDsSjRkBFYEESgEpPDY2PDs4OAAAAQBE/moDZgReACIANkAdDBgiEgUFHiIDIyQPCGFZDxAVAmFZFRYgG11ZICMAPysAGD8rABg/KxESARc5ETMRMzMxMDcWMzI2NTQmIyIGByc2NjMyABEQACMiJxUUFjMyNxUGIyIRVoyLpZqgojeGMjcxoF7tAQb+9fFIKjY0LzAqWufHQNPPxtQdGZYZIv7b/vL+6f7YCHNIOxKNGQEVAAH/xf5qAkwGHwAgADpAHhoiBA4OCgAAFBQhIhcdXVkXAQMRXlkDFgwHXVkMIwA/KwAYPysAGD8rERIBOREzETMyETMRMzEwARQGBxUUFjMyNxUGIyIRERYzMjY1ERAhMhcVJiYjIgYVAWJ+fjY1MS0qWecxOUg3AQ5cNBE+HEg3AUKfrgdtSDsSjRkBFQEXF1lsA4UBViGJCA5ZawAAAAABAKL+agVmBEoAIQBJQCYSIwEgCgcZDBYWGSADIiMaHQghDxgKXVkYFR0EXVkdFhQPXVkUIwA/KwAYPysAGD8rABg/MxI5ERIBFzkRMxEzMxEzETMxMAERFBYzMjY1ETMRMxEUFjMyNxUGIyIRNSMnIwYGIyImNREBWHd9qZq1WjY0MS0qWedMGgkxtHfGyQRK/T2FgbzRAjz8TP7rSDsSjRkBFYGRT1a+0QLPAAAAAAEARP5qA48ESgAiAFtAMAAdBAchDQENFwcdHQUTFwQjJCIFBSFeWQUFCgQBAgIBZFkCDwoaXVkKFhUQXVkVIwA/KwAYPysAGD8rERIAORI5GC8rEQAzERIBFzkRMxEzMxEzETMRMzEwASE1IRUBBBEUBiMiJxUUFjMyNxUGIyIRERYzMjY1NCYjIzUCav3uAwz+qAGD9951YDY1MS0qWee3vY+Wmp+UA76Mh/7XIf7Gn7QSfUg7Eo0ZARUBVlZnZGZahQACAJECqANcBcMADwAcADBAFw0ICAoDGgoaHR4IDQYLSwpOEABMFwZPAD8zPzI/PxE5ORESATk5ETMRMxEzMTABMhYVFAYjIicHIxEzFzY2FyIGFRUUFjMyNjU0JgIQna+uoIdcDoyMDihsPWhfYGlbW1sFw9C8vdJ1aQMCaDg9gXSAFZR+lH5/igAAAAEAaAKoAr4FwwAVACBADg4DCRQDFBYXDAZMEQBPAD8yPzMREgE5OREzETMxMAEiJjU0NjMyFhcHJiMiERQWMzI3FQYB17G+wrc8dyovb0PNZGFldlUCqM+6wNIcFH8n/vZ/hzWJLwAAAgBaAmgC+AXDAB0AJwBvQEQOGwIFFAkgBxslJQcJBQQoKRYgESIHAgAe2hjqGALJGAGpGLkYyRgDmBgBDxivGAI/GE8YXxj/GAQYGAwEABEMTCIATwA/Mj8zEMYSOS9dcV1dcV0zETk5ERI5ORESARc5ETMRMxEzETMRMzEwASInBgcnNjcmNTQ2MzIXByYjIgYVFBc2MzIWFRQGAyIHFjMyNjU0JgHsi1UQH4MqIjXFtnVqMW9EaWMIaH9tgZZeW0s3VDI/JgKoNR5XPGo6YofB0TB/J4WFJTVYY1NgcQEGVDMrIxUkAAIAaAKoA1AG+gAbACYAUUAoGAAMGRMQIiIGGQ4AHBwOEQYEJygZFgMUDhEMEwwfCQkDExQXRyUDTwA/Mz/GMhI5LzM5ERI5ORESOTkREgEXOREzETMRMxEzMxEzETMxMAEUBiMiJjU0NjMyFhcmJwcnNyYnNxYXNxcHFhIHNCYjIgYVFBYzMgNQwbamy7+kOV8uM2LBR5twC0RqUqpIi2dupnBfbGFuX88EPcbPvp6iuBolfV5tZ1g8BW0wN2NnTWP+97dZZ3JtanEA//8ASAKoArgFwwIGBdQAAAABAC8CtAJqBvIAFAAzQBgMFgUDEwICBwMDABUWA04PCkcBBQUHE0sAPzMzETM/Mz8REgE5OREzMxEzETMRMzEwASMRIxEjNTc1ECEyFwcmIyIGFRUzAhvDpIWFAQpMYChHOjU0wwU7/XkCh1ApIQEdIXkbS1EhAAAB/8sBaAGiBbYAFQA2QBkNBggVEwQICAETExYXBxUVBAAACwJLEAtNAD8zPxI5LzMzETMREgE5ETMzETMRMxEzMjEwEzMRMxEzFSMRFAYjIic1FjMyNjURIydqpmtrcHk/REsnJy1qBIcBL/7Rff5KbX8VgRUrPAG6AAD//wBmAWgDMQXDAgYF1QAAAAEAjQFoAzcFtgAUACtAFAETCQYNChMKFRYNEAcUSwpNAxBPAD8zPz8zEjkREgE5OREzMzMRMzEwAREUMzI2NREzESMRNDcGBiMiJjURATGWaWOkpAkofT+VlgW2/hemeIgBj/uyAVkzMTxBiJUB8QACACcCtAGiBs0ACwAXAFxANwIEBwUAEgQEDAkFBRgZFQ8PHw+PD58PBA8KAwcHAAsIAeoI+ggCqAgBDwgfCC8IAwgIBQpLBU4APz8SOS9dXV1xMzMRMxDEXTIREgE5ETMzMxEzMxEzETMxMAEzFSMRIxEjNTMRMyc0NjMyFhUUBiMiJgE3a2umamqmrjYoJDc3JCg2BId9/qoBVn0BL7kxLS0xLS4uAAABAI0CqAIUBbYADQAaQAsBDAwHDg8NSwMJTwA/Mz8REgE5OREzMTABERQzMjY3FQYjIiY1EQExWh9UFj9agG4Ftv3oeRAIeht6egIaAAAAAAEATgK0AdUFtgALADBAFggAAAoFAQEKAwMMDQkEBAZLCgMDAU4APzMRMz8zETMREgE5ETMzETMRMxEzMTABITU3ESc1IRUHERcB1f55cHABh3FxArRgGQIOG2BiGf3yGQAAAAABAE4CtAHVBbYAEwBiQDYAEAQEAg0JBQUHEgICCwcHFBUBCQkSCwoB6gr6CgKoCgEPCh8KLwoDCgoFDhEMDA5LAgcHBU4APzMRMz8zETMREjkvXV1dcTMzETMREgE5ETMzETMRMxEzMxEzETMzMTABIxUXFSE1NzUjNTM1JzUhFQcVMwHRbXH+eXBoaHABh3FtBArdGWBgGd19tBtgYhm0AAAAAAP/kQFoAaIGzQAPABgAJABZQDYWCwsCHwUZDxMEBRMFJSYiDxwfHH8cjxyfHAUcAEsFFBQCHw4vDgK/Ds8OAg5ACQ9IDk4QCE0APzM/K11xMzMRMz/EXTIREgE5OREzETMzETMzMhEzMTATMxEzFSMGBiMiJjU0NjMzBzI2NSMiFRQWEzQ2MzIWFRQGIyImkaZrawNqZGZvam8nMxkaIEAaPjYoJDc3JCg2Bbb8/m5sclRMWVPfMj86Gh0EmjEtLTEtLi4AAAAAAQCRAWgCBAbpAAwAHEAMBg4BCwsNDgxGAwhNAD8zPxESATkRMxEzMTABERQzMjcVBiMiJjURATdYJVA/RnxyBun7dXcXeR16egSNAAH/wQFoATMG6QANABpACwYBDAwODw1GCQRNAD8zPxESATkRMzIxMAERFAYjIic1FjMyNjURATNzekY/TiYvKQbp+3N6eh15Fzw7BIsAAAAAAQCRArQCkQW2AAUAGkALAgUFBAYHAEsCBU4APzM/ERIBOTkRMzEwEzMRIRUhkaYBWv4ABbb9f4EAAAAAAQCRAWgE/gXDACcAREAiBAodGRkaERIACgoSGgMoKSAdGiAbSxIaTg0VFSQgTAgCTQA/Mz8zMxEzPzM/ERI5ORESARc5ETMRMxEzETMRMzEwARQjIic1FhYzMjURNCMiBhURIxE0IyIGFREjETMXNjYzMhc2MzIWFQT+vzY6Dz4RL4dlVaSJXlmmjgwubEGtQVafi4oCN88XhwcRQQJxonhw/loB7KJ4iP5yAwJyRziKioiXAAAAAAEAiwFoBPoFtgAgADxAHgEfCQYOEhEVFRIGHwQhIhUaHA8HIEsSTQsDAxgcTwA/MzMRMz8/MzMSOTkREgEXOREzETMRMxEzMTABERQzMjY1ETMRFDMyNjURMxEjETQ3BgYjIicGIyImNREBL4dkV6OIX1mmpgwubEGqQ1igiYwFtv4VpHdwAaj+FaR3iQGP+7IBOkU+RzaHh4aXAfEAAAAB//IBaAM7BcMAGgAyQBgEDQAAChMUChQbHA0UEAtLFE4XEEwHAk0APzM/Mz8/ERI5ERIBOTkRMxEzETMyMTABFCMiJzUWMzI2NREzFzY2MyARESMRNCMiBhUBN89CNDwgJh2ODCaEUgEUoZZqYwJc9B97GzRDA1h0PUT+4f4QAeqkeYcAAAAAAQCRAWgD3wXDABwANEAZBx4XExMUAAwUDB0eFxQZFUsUTg8ZTAMKTQA/Mz8zPz8REjkREgE5OREzETMRMxEzMTABFBYzMjY3FQYGIyI1ETQjIgYVESMRMxc2MzIWFQM7HSkROxIUQyLMlmpjpo4MZY+KkgJePzgSCXsME/QCQqR5h/5yAwJwfYiXAAABAJECtANMBbYADQAsQBQDBgYHAQwKBwoODwMKBw0ISwIHTgA/Mz8zEjk5ERIBOTkRMzMRMxEzMTABESMBFhURIxEzASY1EQNMvf6aCKC9AWgIBbb8/gIpPFX+aAMC/dcyVQGiAAAAAAMAaAKoA1AFwwALABEAFwBWQDQVEBAGABYPBg8YGRALFQH7FQHKFdoVAuoVAbkVAakVAZgVAR8VLxUCDBUBFRUDEglMDANPAD8zPzMSOS9dXV1dcV1xXXEzERIBOTkRMzMRMxEzMTABFAYjIiY1NDYzMhYBMjY3IRYTIgYHISYDUMewpsvFsKrJ/othXwn+bhS1XlwNAY4XBDe809W6utLW/jptatcCG2JhwwAGAGYBaAP6BvgAEQAWAB0AHgAfACAASEAkEgkPGwQEFAwFABcXBQkDIiEFIE0NH0YaFRUPDB5MGxQUAwZOAD8zMxEzP8UyMhEzP8Y/xhESARc5ETMRMzMzETMzETMxMAEUBgcRIxEmJjU0NjcRMxEWFgUUFxEGBTQmJxE2NgEDEwP6w7airsvEuZ6xyP0W09MCQGBvZmn+56ABBEKrzRL+vwFBFdKjrswRASv+1RnTn+ofAhEf6XOFEP3vFIcB7wEm+n8AAQBiAWgCqgXDACwAM0AYIQALFScGBhwQABUQFS0uJB9MEwNPCA1NAD8zPzM/MxESATk5ETMRMzMRMxEzETMxMAEUBiMjJxUUMzI3FQYjIiY1ERYzMjU0JicuAjU0NjMyFwcmIyIGFRQWFxYWAqqqmjEtWBpRPzx6cpZwnlBedlcrpI2PfDaBXERFSW+NYwONb3YCTHcXeR14fAERSGApOCIsO000YnE6ezYpIyQ0KTVsAAAAAf/yAWgB2QbyABYAIEAOEBgEAAoKFxgTDUcHAk0APzM/MxESATkRMzIRMzEwARQjIic1FjMyNjURNDMyFxUmJiMiBhUBN89CNDwgJh3RSywYNw8mHgJc9B97GzRDA6D0H3sMDzZBAAABADEBdwIjBm0AHQA8QB4TGwIVDAoRFRUKGwcKBx4fBQBNDxQLDg8EEUwYCE4APzM/FzMvPzIREgE5OREzETMRMxEzETMRMzEwASInNRYzMjU1IjURIzU3NzMVMxUjERQWMzI3ERQGAWozNzIqL+1tcTNt3d02LC5RVwF3FoMUOX/yAaROM6Kqef5gPjkX/vNgZgAAAgAnAqgDogW2ABYAHgBfQDMHCRMRARwcFREFCQIaCQsLGhEDHyALDhYIGxMTBQGoFAEPFB8ULxQDFBQKAxZLCk4XDk8APzM/PzMSOS9dXTMzMxEzMxESORESARc5ETMRMxEzETMzETMRMxEzMTABESERMxEzFSMRIycGBiMiJjU1IzUzEQEyNjchFRQWATEBYKRtbYsMLXlEk5ZkZAE6ZmEF/qBIBbb+2QEn/tl9/qJxOUSFlFF9ASf9cWx/R1RQAAAAAAEARAKoA3MFtgAfADpAHh8DDRMJEAwAHAMZGRwMCQQgIR0AHBAEDQ0OSxYGTwA/Mz8zEhc5ERIBFzkRMxEzETMRMzMRMzEwARYWFRQGIyImNTQ2NyM1IRUGBhUUFjMyNjU0Jic1IRUCtEtRx7CrxlJKwAFqSVVoY2JrXUUBbQU1LZ1ZqMKzm3GXN4F0IKVtb3p6cWWvHHSBAAAAAAEAiwKoAzkFtgARACBADg8MBgMMAxITBA1LAAlPAD8zPzMREgE5OREzETMxMAEyNjURMxEUBiMiJjURMxEUFgHjXFejobW1o6ZXAydpeAGu/k62pqi0AbL+UnhpAAAAAAEAiwKoAzkFwwAbACVAEhUSDBsbBRIDHB0TSwMITBgPTwA/Mz8zPxESARc5ETMRMzEwATQmIyIHNTYzMhYVFRQGIyImNREzERQWMzI2NQKWIiw0M0I7dWanr7OlplBiYVIEzUIuDoETdnzNsKyntQGy/lJ2a2h5AAEAEgK0AxsFtgAKABpACwkBCwwFCgIJTgpLAD8/MxI5ERIBOTkxMAEBIwMmJwYHAyMBAe4BLbGlHw4UHKevAS0Ftvz+Ab9XVVxQ/kEDAgAAAQBSArQCpgW2AAkALkAVAAcECAEHAwEDCgsHBAQFSwIICAFOAD8zEjk/MxI5ERIBOTkRMxEzMxEzMTABITUBITUhFQEhAqb9rAGN/osCNP52AZICtGACJ3ts/eMAAAAAAQBSAWgDSAW2ABYAOUAcERgECAEHAwoWFgMBAxcYBwQEBUsCCAgBTg0TTQA/Mz8zEjk/MxI5ERIBFzkRMxEzETMzETMxMAEhNQEhNSEVASEVFBYzMjY3FQYjIiY1Ag7+RAGN/osCNP52AZIdJhFBDTRFY14CtGACJ3ts/ePPQzQUB3sfeHwAAAIAUgI1AzUFtgAVAB4AZUA7AgwIDAUYBAsHExwcBwQFBB8gAQW6FgGZFqkW6Rb5FgQWDxAfEC8QA7AQARAQBQsICAlLBgUYDAwABU4APzMzETMSOT8zEjkSOS9dcTNdXRDGERIBFzkRMxEzETMRMzMRMzEwAQcnNzcjNQEhNSEVATM2NjMyFhUUIzciBzMyNjU0JgHFNngPD+MBjf6LAjT+dlJFgFpNY/44PzhFNSwaArR/MSokYAIne2z9439cWEO522IhFBQZAAAAAAEALQFoAtUFtgAZADpAHBYICAATGRUDDw8VEwMaGxQTAAAXDAZNGRYWF0sAPzMSOT8zEjkvMzMREgEXOREzETMRMzMRMzEwARYWFRQGIyInNRYWMzI2NTQmIyM1ASE1IRUBh52x2bSrcDydSGd8ioJpASn+XQJsBAoSroabwTWRHil4Y2NqZgE9hGsABABoAqgDQgb6AAsAEgAZABoATEAsFhAQBgAXDwYPHBsQqRYBmBYB/BYB2hYByhYBDxYfFi8WAxYWAxMJGkcMA08APzM/xDIROS9dXXFdcXEzERIBOTkRMzMRMxEzMTABEAIjIgIREBIzMhIBMjY3IRYWEyIGByEmJjcDQre4tLe1trW6/pFjXwf+cgRcZWFaCgGMCl2/BNH+5f7yARgBEQEaAQ/+6v1BvL28vQNWsaursXcAA/6iBQwBXgZtAAkAFQAgAB9ADhgeDRMeHgUTbwUBBYABAC8azV3EEjkvETMRMzEwEyMmJic1MxYWFyU0NjMyFhUUBiMiJiU0MzIWFRQGIyIme1Y/cRvFDTgX/ic2KCY4OCYoNgIAXiU5OSUqNAUZTq43FD+0PEI2Li81NTIyymUvNjQyMgAAAAP+fQUMAYMGbQAHABMAHgAZQAsRBxYcHG8CAQKABwAvGsxdOS8zEMQxMAM2NzMVBgcjJzQ2MzIWFRQGIyImJTQzMhYVFAYjIiaLQBzFVnVW+DYoJjg4Jig2AkpeJTk5JSo0BTG0exSikVo2Li81NTIyymUvNjQyMgAAAAAB/4X+TgBm/6oAKwAdQBIrEAAgAGAAcAAEABMQGCAYAhgAL10zL10yMTATIjU0PgI1NC4CNTQ+AjU0IyIHJzYzMhUUDgIVFB4CFRQOAhUUF1qyISghISghIysjMiwuCEM2aCAmIB0kHR0kHVr+Tj8QFQ4LBggJChAQDxcSEAgRFTcbPRkdEw0JBgcLExIREwwJBw4EAAAAAAH+hwTdAXEF1wATABlADBMRvwkBCYAMDwUBBQAvXTMa3V3EMjEwASIOAiMiJjU1MxQzMj4CMzMVAWROhHdwOm58g2sqY3iPVxEFVCUtJW5tH3UjKyN/AAAB/tEE2QE3BfQABQAZQBACBQ8ALwBfAH8AnwDPAAYAAC9dMsYxMAEhNxcFIf7RASX8Rf7k/rYFaoqBmgAB/skE2QEvBfQABQAZQBADAQ8ELwRfBH8EnwTPBAYEAC9dM80xMAEhJTcXIQEv/rb+5EX8ASUE2ZqBigAB/tEExQE3Bd8ABQAMswMFBQAALzIRMzEwASEFBych/tEBSgEcRfz+2wXfmYGJAAAB/skExQEvBd8ABQAMswIBAQQALzMRMzEwASEHJyUhAS/+2/xFARwBSgVOiYGZAAAB/kIEwwG+BeEABwAvQCBmA3YDhgMDaQd5B4kHAwIHDwQfBAIEAwQGAw8AXwACAAAvXRcyL10zM11dMTADJTcXJQUHJ5b+2EXyAR0BKEXyBMObgYOFm4GDAAAAAAH+QgTDAb4F4QAHAC9AIGYEdgSGBANpAHkAiQADBQAPAx8DAgMEAwEDDwdfBwIHAC9dFzMvXTMzXV0xMAMHJyUFNxcFh/JFASgBHfJF/tgFSIOBm4WDgZsAAAAAAf9a/hQAsP++AA4AF7QLAAoFALj/wLMLD0gAAC8rMi8QxDEwFzIXByYjIgYVFSMRMxc2ZCclDhwmSkN5aQhBQghuDFlbjAGgUFoAAAH/VAS4AKQGUgAMABFACR8FLwU/BQMFAAAvxF0xMBMmJzU2NzMVBgcWFxWN2l94wRctkHVIBLhtHYseZ2kdRzosZwAAAv6H/hQBj/+uAA8AHQAjQBQLCAQXDggQFyAXMBcDF08dXx0CHQAvXcZdxDIQxBE5MTAFBgYHIyYmJzUzFhc3NjczJRYXFQYGByM1NjcmJzUBjyBOHYsVUh5oKj0XKiNo/Q/VZC+rXxcnlm5PhT3IW1TaMhg6qzxxOBtqH4sMSDJpGko2MWYA//8AxwAABMUHNwImACUAAAEHAU8BewFSABW0AysFJgO4/7+0IigPCyUBKzUAKzUA//8Arv/sBHsGFAImAEUAAAEHAU8BlgAAAA65AAL/sbQiKA8DJQErNf//AMf+mATFBbYCJgAlAAABBwJkBNMAAAAOuQAD/6m0IigPCyUBKzX//wCu/pgEewYUAiYARQAAAQcCZAUIAAAAC7YCDyIoDgMlASs1AAAA//8Ax/7UBMUFtgImACUAAAEHAU0AFPn7AB5ADAMAIiAicCLgIgQiA7j/qbQlJA8LJQErNQARXTX//wCu/tQEewYUAiYARQAAAQcBTQAv+fsAHkAMAgAiICJwIuAiBCICuP/2tCUkDgMlASs1ABFdNf//AH3+FATPB3MCJgAmAAAAJwB6AgQAAAEHAHYBFwFSABtAEgIyBSYBYh4YDwglAsIzLg8VJSs1KzUAKzUAAAD//wBx/hQDkwYhAiYARgAAACcAegFMAAABBgB2PwAAFEAOATocFwMVJQKOMi0DCSUrNSs1//8AxwAABVoHNwImACcAAAEHAU8BpAFSABW0AhoFJgK4/520ERcFACUBKzUAKzUA//8Acf/sBD0GFAImAEcAAAEHAU8BGwAAAAu2AichJwMNJQErNQAAAP//AMf+mAVaBbYCJgAnAAABBwJkBRQAAAAOuQAC/5+0ERcFACUBKzX//wBx/pgEPQYUAiYARwAAAQcCZASoAAAADrkAAv/ttCEnAw8lASs1//8Ax/7UBVoFtgImACcAAAEHAU0APfn7AB5ADAIAESARcBHgEQQRArj/iLQUEwUAJQErNQARXTX//wBx/tQEPQYUAiYARwAAAQcBTf/z+fsAHkAMAgAhICFwIeAhBCECuP/3tCQjAw8lASs1ABFdNf//AMf+OwVaBbYCJgAnAAABBwI5AKgAAAAOuQAC/4q0GhYFACUBKzX//wBx/jsEPQYUAiYARwAAAQYCOU4AAA65AAL/6bQqJgMPJQErNQAA//8Ax/5nBVoFtgImACcAAAEHAUsAWvmOACG0AtAeAR64/8C1ChJIHiMCuP+otB4YBQAlASs1AD8rXTUA//8Acf5nBD0GFAImAEcAAAEHAUv/7/mOACG0AtAuAS64/8C1ChJILiMCuP/2tC4oAw8lASs1AD8rXTUA//8AxwAAA/gIXgImACgAAAEHCUkCYAFSAClAHAIBUA9gDwIgD/APAg9AERNIDwUmAgEBDw4CCyUBKzU1ACsrcXI1NQD//wBx/+wEGwcMAiYASAAAAQcJSQJUAAAADbcDAg8fHgMKJQErNTUA//8AxwAAA/gIXgImACgAAAEHCUoCYAFSAClAHAIBUA9gDwIgD/APAg9AERNIDwUmAgEBDw4CCyUBKzU1ACsrcXI1NQD//wBx/+wEGwcMAiYASAAAAQcJSgJUAAAADbcDAg8fHgMKJQErNTUA//8Ax/5nA/gFtgImACgAAAEHAUsABPmOAB+0AdAYARi4/8BADAoSSBgjAQIZEwILJQErNQA/K101AAAA//8Acf5nBBsEXgImAEgAAAEHAUsADPmOAB+0AtAoASi4/8BADAoSSCgjAiQpIwMKJQErNQA/K101AAAA//8Ax/6IA/gFtgImACgAAAEHAVL/4vmvACdACgGvFAEUQBkbSBS4/8C0CQ5IFAG4//20FSECCyUBKzUAESsrcTUAAAD//wBx/ogEGwReAiYASAAAAQcBUv/I+a8AJ0AKAq8oAShAGRtIKLj/wLQJDkgoArj//LQlMQMKJQErNQARKytxNQAAAP//AMf+FAP4Bz4CJgAoAAAAJwFOAAwBUgEHAHoBewAAABtAEgEMBSYCAx8aAQAlAQoPFwIDJSs1KzUAKzUAAAD//wBx/hQEGwXsAiYASAAAACYBTv0AAQcAegFvAAAAFEAOAykvKgMSJQIUHycDCiUrNSs1//8AxwAAA/gHNwImACkAAAEHAU8BXAFSABNACwETBSYBBgoQAgQlASs1ACs1AAAA//8AHwAAAxkHYAImAEkAAAEHAU8BDgF7ABNACwEbFhwIDSUBHwImACs1ASs1AAAA//8Aff/sBTsGvAImACoAAAEHAU0A+AFSAB1AFAF/H48fnx+vHwQfBSYBdx8eCAIlASs1ACtdNQD//wBx/hQEPQVqAiYASgAAAQYBTQQAAAu2AggsKxQdJQErNQD//wDHAAAFJQc3AiYAKwAAAQcBTwHuAVIAE0ALARUFJgEBDBIGCyUBKzUAKzUAAAD//wCuAAAETAc3AiYASwAAAQcBTwGsAVIAE0ALATgXHQoWJQEgAiYAKzUBKzUAAAD//wDH/pgFJQW2AiYAKwAAAQcCZAVaAAAAC7YBAAwSBgslASs1AAAA//8Arv6YBEwGFAImAEsAAAEHAmQE3QAAAA65AAH//LQXHQoWJQErNf//AMcAAAUlBykCJgArAAABBwBqAKYBUgAXQA0CASEFJgIBAQweBgslASs1NQArNTUAAAD//wCuAAAETAcrAiYASwAAAQcAagBkAVQAF0ANAgE4FykKFiUCASwCJgArNTUBKzU1AAAA//8AWv4UBSUFtgImACsAAAEGAHo9AAALtgETGhsFBCUBKzUA//8APv4UBEwGFAImAEsAAAEGAHohAAALtgESJSYJCCUBKzUA//8Ax/6GBSUFtgImACsAAAEHAU4AmvmtACK3AQ8MAZAMAQy4/8BACwkOSAwBAQ8XBgslASs1ABErXXE1//8Arv6GBEwGFAImAEsAAAEHAU4AG/mtACS3AQ8aAZAaARq4/8C0CQ5IGgG4//y0GiIKFiUBKzUAEStdcTUAAP///+T+iALUBbYCJgAsAAABBwFS/uL5rwAlQAoBrxQBFEAZG0gUuP/AQAsJDkgUAQIVIQYLJQErNQARKytxNQD///+Q/ogCgAXlAiYATAAAAQcBUv6O+a8AJ0AKAq8UARRAGRtIFLj/wLQJDkgUArj//7QZJQQKJQErNQARKytxNQAAAP//ACkAAAJ3CEoCJgAsAAABBwiIAUoBUgAmQBADAgEgGzAbQBsDGwUmAwIBuP/2tB4oBgslASs1NTUAK3E1NTX////2AAACRAb4AiYA8wAAAQcIiAEXAAAAEEAJAwIBFRYgAgMlASs1NTUAAP//AMcAAAT0B3MCJgAuAAABBwB2AKQBUgATQAsBFgUmARcWEgYAJQErNQArNQAAAP//AK4AAAQzB5wCJgBOAAABBwB2AHsBewATQAsBaxkVDAQlARkCJgArNQErNQAAAP//AMf+mAT0BbYCJgAuAAABBwJkBRAAAAAOuQAB/860DRMGACUBKzX//wCu/pgEMwYUAiYATgAAAQcCZASmAAAADrkAAf/RtBAWDAYlASs1//8Ax/7UBPQFtgImAC4AAAEHAU0Acfn7AB5ADAEADSANcA3gDQQNAbj/77QQDwYAJQErNQARXTX//wCu/tQEMwYUAiYATgAAAQcBTQAE+fsAHkAMAQAQIBBwEOAQBBABuP/vtBMSDAYlASs1ABFdNf//AMf+mAP+BbYCJgAvAAABBwJkBLoAAAAOuQAB//O0BgwBBSUBKzX//wCe/pgBcwYUAiYATwAAAQcCZANtAAAAC7YBAAQKAgMlASs1AAAA////9f6YA/4GvAImAC8AAAAnAU3+ygFSAQcCZAS6AAAAKUAOAX8JjwmfCa8JBAkFJgK4//NADAoQAAUlAQIJCAECJSs1KzUAK101AP///9r+mAI6Bw4CJgBPAAAAJwFN/q8BpAEHAmQDbQAAACdAHAFfB28HAgdAEBNIBwImAgAIDgEAJQECBwYCAyUrNSs1ACsrcTUAAAD//wDH/tQD/gW2AiYALwAAAQcBTQAE+fsAHkAMAQAGIAZwBuAGBAYBuP/9tAkIAQUlASs1ABFdNf///9j+1AI4BhQCJgBPAAABBwFN/q35+wAbQBIBAAQgBHAE4AQEBAEABwYCAyUBKzUAEV01AAAA//8Ax/5nA/4FtgImAC8AAAEHAUv///mOACG0AdATARO4/8C1ChJIEyMBuP/7tBMNAQUlASs1AD8rXTUA////r/5nAmMGFAImAE8AAAEHAUv+q/mOAB+0AdARARG4/8BADAoSSBEjAQERCwIDJQErNQA/K101AAAA//8AxwAABnsHNwImADAAAAEHAU8CmAFSABNACwEdBSYBABQaBw0lASs1ACs1AAAA//8ArgAABtUF5QImAFAAAAEHAU8CtAAAAA65AAH//LQjKRIiJQErNf//AMf+mAZ7BbYCJgAwAAABBwJkBgQAAAAOuQAB//+0FBoHDSUBKzX//wCu/pgG1QReAiYAUAAAAQcCZAYhAAAADrkAAf/7tCMpEiIlASs1//8AxwAABU4HNwImADEAAAEHAU8CAAFSABW0AR0FJgG4//+0FBoJEyUBKzUAKzUA//8ArgAABEwF5QImAFEAAAEHAU8BjQAAAAu2ARkVGwoUJQErNQAAAP//AMf+mAVOBbYCJgAxAAABBwJkBW8AAAALtgEAFBoJEyUBKzUAAAD//wCu/pgETAReAiYAUQAAAQcCZATdAAAADrkAAf/8tBUbChQlASs1//8Ax/7UBU4FtgImADEAAAEHAU0Arvn7AB5ADAEAFCAUcBTgEQQUAbj//7QXFgkTJQErNQARXTX//wCu/tQETAReAiYAUQAAAQcBTQAd+fsAHkAMAQAVIBVwFeAVBBUBuP/7tBgXChQlASs1ABFdNf//AMf+ZwVOBbYCJgAxAAABBwFLAKz5jgAftAHQIQEhuP/AQAwKEkghIwEAIRsJEyUBKzUAPytdNQAAAP//AK7+ZwRMBF4CJgBRAAABBwFLABv5jgAhtAHQIgEiuP/AtQoSSCIjAbj//LQiHAoUJQErNQA/K101AP//AH3/7AXDCF4CJgAyAAABBwlIAx8BUgAisgMCGLj/wLcbHUgYBSYDArj//7QhLQYAJQErNTUAKys1Nf//AHH/7ARoBwwCJgBSAAABBwlIAmoAAAAQsQMCuP/+tCEtBwAlASs1NQAA//8Aff/sBcMIHwImADIAAAEHCUYDHwFSACSzBAMCMLj/wEAPGx1IMAUmBAMCADlFBgAlASs1NTUAKys1NTUAAP//AHH/7ARoBs0CJgBSAAABBwlGAmoAAAASsgQDArj//7Q5RQcAJQErNTU1//8Aff/sBcMIXgImADIAAAEHCUkDHwFSAClAHAMCUBtgDwIgG/APAhtAERNIGwUmAwIAGxoGACUBKzU1ACsrcXI1NQD//wBx/+wEaAcMAiYAUgAAAQcJSQJqAAAAELEDArj//7QbGgcAJQErNTUAAP//AH3/7AXDCF4CJgAyAAABBwlKAx8BUgApQBwDAlAbYA8CIBvwDwIbQBETSBsFJgMCABsaBgAlASs1NQArK3FyNTUA//8Acf/sBGgHDAImAFIAAAEHCUoCagAAABCxAwK4//+0GxoHACUBKzU1AAD//wDHAAAEbwdzAiYAMwAAAQcAdgBYAVIAE0ALAhwFJgIOHBgHACUBKzUAKzUAAAD//wCu/hQEewYhAiYAUwAAAQcAdgCBAAAAC7YCPSsnCRIlASs1AAAA//8AxwAABG8HNwImADMAAAEHAU8BYgFSABW0AhwFJgK4/9G0ExkHACUBKzUAKzUA//8Arv4UBHsF5QImAFMAAAEHAU8BgQAAAA65AAL/9rQiKAkSJQErNf//AMcAAATbBzcCJgA1AAABBwFPAXMBUgAVtAIfBSYCuP+stBYcDBMlASs1ACs1AP//AK4AAAMvBeUCJgBVAAABBwFPANsAAAAOuQAB//a0EhgMAiUBKzX//wDH/pgE2wW2AiYANQAAAQcCZAT2AAAADrkAAv/BtBYcDBMlASs1//8Anv6YAy8EXgImAFUAAAEHAmQDbQAAAAu2AQASGAsKJQErNQAAAP//AMf+mATbBrwCJgA1AAAAJwFNACEBUgEHAmQE9gAAACtADgJ/GY8ZnxmvGQQZBSYDuP/AtRogCxMlArj/4bQZGAwQJSs1KzUAK101AAAA//8Anv6YAy8FagImAFUAAAAmAU2KAAEHAmQDbQAAABa3AgAWHAsKJQG4//e0FRQMAiUrNSs1AAD//wDH/tQE2wW2AiYANQAAAQcBTQBK+fsAHkAMAgAWIBZwFuAWBBYCuP/UtBkYDBMlASs1ABFdNf///9z+1AMvBF4CJgBVAAABBwFN/rH5+wAbQBIBABIgEnAS4BIEEgEEEhMLCiUBKzUAEV01AAAA//8AaP/sBAQHNwImADYAAAEHAU8BPQFSABNACwEvBSYBECYsBgAlASs1ACs1AAAA//8AaP/sA3kF5QImAFYAAAEHAU8A4QAAAA65AAH/+rQkKhIAJQErNf//AGj+mAQEBcsCJgA2AAABBwJkBIEAAAAOuQAB/+e0JiwGACUBKzX//wBo/pgDeQReAiYAVgAAAQcCZAQ5AAAADrkAAf/ktCQqEgAlASs1//8AaP/sBAQHcwImADYAAAEHCUMCVgFSABdADQIBOwUmAgFOJjcGACUBKzU1ACs1NQAAAP//AGj/7AN5BiECJgBWAAABBwlDAeUAAAANtwIBIiQ1EgAlASs1NQD//wBo/+wEBAgfAiYANgAAAQcJRAJGAVIAF0ANAgE8BSYCARA+OQYAJQErNTUAKzU1AAAA//8AaP/sA3kGzQImAFYAAAEHCUQB+gAAAA23AgEJPDcSACUBKzU1AP//AGj+mAQEBzcCJgA2AAAAJwFPAT0BUgEHAmQEgQAAAB60AS8FJgK4/+ZADDI4BQAlARgmLBMYJSs1KzUAKzX//wBo/pgDeQXlAiYAVgAAACcBTwDhAAABBwJkBDkAAAAXuQAC/+NADDA2BQAlAQUkKhIXJSs1KzUAAAD//wAUAAAEXAc3AiYANwAAAQcBTwEvAVIAE0ALAREFJgEACA4EBiUBKzUAKzUAAAD//wAh/+wCtgbbAiYAVwAAAQcBTwArAPYAF0AOATAgTyACIAEeFx0KEyUBKzUAEXE1AAAA//8AFP6YBFwFtgImADcAAAEHAmQEngAAAAu2AQAIDgEAJQErNQAAAP//ACH+mAK2BUYCJgBXAAABBwJkBC8AAAALtgERFx0JBCUBKzUAAAD//wAU/tQEXAW2AiYANwAAAQcBTf/e+fsAG0ASAQAIIAhwCOAIBAgBAAgJAQAlASs1ABFdNQAAAP//ACH+1ALoBUYCJgBXAAABBwFN/135+wAbQBIBABcgF3AX4BcEFwEAFxgJAyUBKzUAEV01AAAA//8AFP5nBFwFtgImADcAAAEHAUv/3PmOAB+0AdAVARW4/8BADAoSSBUjAQAVDwEAJQErNQA/K101AAAA//8AIf5nAwkFRgImAFcAAAEHAUv/UfmOACG0AdAkASS4/8C1ChJIJCMBuP/2tBcdCQMlASs1AD8rXTUA//8AuP6aBR8FtgImADgAAAEHAGoAnPmOACNAFwIBLxUBABU/FVAVjxUEFQIBARIkCAElASs1NQARXXE1NQAAAP//AKL+mgREBEoCJgBYAAABBwBqABL5jgAmQBECAS8eAQAePx5QHo8eBB4CAbj/8LQVJxQKJQErNTUAEV1xNTX//wC4/ogFHwW2AiYAOAAAAQcBUgBz+a8AJ0AMAS8UAQ8UHxSvFAMUuP/AQAsJDkgUAQEbJwgBJQErNQARK3FxNQAAAP//AKL+iAREBEoCJgBYAAABBwFS/+r5rwApQAwBLyEBDyEfIa8hAyG4/8C0CQ5IIQG4//G0HioUCiUBKzUAEStxcTUA//8AuP5nBR8FtgImADgAAAEHAUsAjfmOAB+0AdAeAR64/8BADAoSSB4jAQAfGQgBJQErNQA/K101AAAA//8Aov5nBEQESgImAFgAAAEHAUsABvmOACG0AdAiASK4/8C1ChJIIiMBuP/xtCIcFAolASs1AD8rXTUA//8AuP/sBR8IXgImADgAAAEHCUgC7AFSACCyAgEbuP/AQA4bHUgbBSYCAQAbJwgBJQErNTUAKys1NQAA//8Aov/sBEQHDAImAFgAAAEHCUgCeQAAAA23AgEGHioUCiUBKzU1AP//ALj/7AUfCB8CJgA4AAABBwlFAuwBUgAxQCEDAgFQLWAtAiAt8C0Czy0BLUARE0gtBSYDAgEBLSwIASUBKzU1NQArK11xcjU1NQD//wCi/+wERAbNAiYAWAAAAQcJRQJ3AAAAEEAJAwIBBTAvFAolASs1NTUAAP//AAAAAATNBzMCJgA5AAABBwFS/+8BUgATQAsBCwUmAQIUIAAIJQErNQArNQAAAP//AAAAAAQQBeECJgBZAAABBgFSkAAAC7YBAhcjAQwlASs1AP//AAD+mATNBbYCJgA5AAABBwJkBMsAAAALtgEACxEACCUBKzUAAAD//wAA/pgEEARKAiYAWQAAAQcCZARtAAAAC7YBAA4UAQwlASs1AAAA//8AGQAAB1YHNwImADoAAAEHAU8CrgFSABNACwEiBSYBABkfCRglASs1ACs1AAAA//8AFwAABjMF5QImAFoAAAEHAU8CGwAAAAu2AQAgJgkeJQErNQAAAP//ABn+mAdWBbYCJgA6AAABBwJkBh0AAAALtgEBGR8JGCUBKzUAAAD//wAX/pgGMwRKAiYAWgAAAQcCZAWJAAAAC7YBACAmCR4lASs1AAAA//8ACAAABKgHNwImADsAAAEHAU8BTgFSABNACwEVBSYBAAwSBAAlASs1ACs1AAAA//8AJQAABBcF5QImAFsAAAEHAU8BFAAAAAu2AQAMEgsHJQErNQAAAP//AAgAAASoBykCJgA7AAABBwBqAAYBUgAZtgIBIQUmAgG4//+0DB4EACUBKzU1ACs1NQD//wAlAAAEFwXXAiYAWwAAAQYAas4AAA23AgEBDB4LByUBKzU1AAAA//8AAAAABIcHNwImADwAAAEHAU8BOQFSABW0ARIFJgG4//+0CQ8HAiUBKzUAKzUA//8AAv4UBBQF5QImAFwAAAEHAU8BAAAAAA65AAH//7QYHgAKJQErNf//AE4AAAREB3MCJgA9AAABBwFL/+0BUgATQAsBFwUmAQIXEQIJJQErNQArNQAAAP//AFAAAANzBiECJgBdAAABBgFLmQAAC7YBFRcRAgklASs1AP//AE7+mAREBbYCJgA9AAABBwJkBK4AAAALtgEAChACCSUBKzUAAAD//wBQ/pgDcwRKAiYAXQAAAQcCZARGAAAAC7YBAAoQAgklASs1AAAA//8ATv7UBEQFtgImAD0AAAEHAU3/7/n7ABtAEgEACiAKcArgCgQKAQENDAIJJQErNQARXTUAAAD//wBQ/tQDcwRKAiYAXQAAAQcBTf+G+fsAG0ASAQAKIApwCuAKBAoBAA0MAgklASs1ABFdNQAAAP//AK7+1ARMBhQCJgBLAAABBwFNAB35+wAeQAwBABcgF3AX4BcEFwG4//u0GhkKFiUBKzUAEV01//8AIf/sArYGzQImAFcAAAEHAGr++QD2ABCxAgG4/9+0FykMBCUBKzU1AAD//wAXAAAGMwaJAiYAWgAAAQcBUADXAAAADbcCAQAmIAkeJQErNTUA//8AAv4UBBQGiQImAFwAAAEGAVDEAAANtwIBBx4YAAolASs1NQAAAP//AF7/7APXBkoCJgBEAAABBwSsAmYAAAALtgISLCkTGSUBKzUAAAD//wCuAAAC5QdgAiYBQQAAAQcBTwDTAXsAE0ALARYCJgETDRMGCiUBKzUAKzUAAAAAAQC4/+wFZAXLACIATEAoEAEYCAgJDwETHx8BIgkEIyQAEBAia1kQEA0JEg0EaVkNBBYca1kWEwA/KwAYPysAGD8SOS8rEQAzERIBFzkRMxEzETMRMxEzMTABASYmIyIGFREjETQAISATARYWFRQEIyInNRYWMzI2NRAhIwLFAT8mkHevubcBJgEDAX5s/t3N7/749uqTVcRcp5/+oH8DOQFQT1PFtPxOA7L7AR7+tP7LBuW51+NPqC4yk5IBDAD//wBx/+wEzQYxAiYBfgAAAQcHlgFzAAAAC7YCDC0zDxklASs1AAAA//8Acf/sBM0GMQImAX4AAAEHB9ABSAAAAA65AAL/37Q2LQ8ZJQErNf//AHH/7ATNBjECJgF+AAABBgelBgAAELEDArj/7rQ1LQ8ZJQErNTX//wBx/+wEzQYxAiYBfgAAAQYHshQAABCxAwK4/9u0Pi0PGSUBKzU1//8Acf/sBM0GMQImAX4AAAEGB6YMAAANtwMCIi1ADxklASs1NQAAAP//AHH/7ATNBjECJgF+AAABBgez+QAADbcDAgg2QA8ZJQErNTUAAAD//wBx/+wEzQbhAiYBfgAAAQYHpxcAAA23AwIGNkIPGSUBKzU1AAAA//8Acf/sBM0G4QImAX4AAAEGB7T5AAAQsQMCuP/otDZCDxklASs1Nf//AAAAAAUbBcwCJgAkAAABBgeWipsAFLMCEQQCuP92tA4UBAUlASs1AD81//8AAAAABRsFzAImACQAAAEGB9CKmwAUswIaBAK4/3S0Fw4EBSUBKzUAPzX//wABAAAF6gXMACcAJADPAAABBwel/sL/mwAYtQMCGQQDArj/lbQWDiUFJQErNTUAPzU1//8AAQAABf4FzAAnACQA4wAAAQcHsv7W/5sAGLUDAiIEAwK4/360Hw4lBSUBKzU1AD81Nf//AAEAAAXBBcwAJwAkAKYAAAEHB6b+mf+bABi1AwIRBAMCuP+vtA4hJQUlASs1NQA/NTX//wABAAAFwQXMACcAJACmAAABBwez/pn/mwAYtQMCGgQDArj/qLQXISUFJQErNTUAPzU1////zgAABckGfAAnACQArgAAAQcHp/7c/5sAIkAOAwIQDm8Ofw6vDgQOAwK4/8C0IyMFBSUBKzU1ABFdNTUAAP///84AAAXJBnwAJwAkAK4AAAEHB7T+3P+bACJADgMCEA5vDn8Orw4EDgMCuP/AtCMjBQUlASs1NQARXTU1AAD//wBY/+wDmAYxAiYBggAAAQcHlgEpAAAAC7YBMzAsEB0lASs1AAAA//8AWP/sA5gGMQImAYIAAAEHB9ABCAAAAAu2AR4vLBAdJQErNQAAAP//AFj/7AOYBjECJgGCAAABBgel0gAADbcCASo4LRAdJQErNTUAAAD//wBY/+wDmAYxAiYBggAAAQYHss4AAA23AgENNy0QHSUBKzU1AAAA//8AWP/sA5gGMQImAYIAAAEGB6bQAAANtwIBVzA5EB0lASs1NQAAAP//AFj/7AOYBjECJgGCAAABBgezuwAADbcCAUIvORAdJQErNTUAAAD//wABAAAEqgXMACcAKACyAAABBweW/3D/mwAUswEPBAG4/720DBIbAiUBKzUAPzX//wABAAAEngXMACcAKACmAAABBwfQ/2P/mwAUswEYBAG4/7S0FQwbAiUBKzUAPzX//wABAAAF2wXMACcAKAHjAAABBwel/sL/mwAYtQIBFwQCAbj/xbQUDCMBJQErNTUAPzU1//8AAQAABdEFzAAnACgB2QAAAQcHsv7W/5sAGLUCASAEAgG4/720HQwjAiUBKzU1AD81Nf//AAEAAAWoBcwAJwAoAbAAAAEHB6b+mf+bABi1AgEPBAIBuP/ktAwfIwIlASs1NQA/NTX//wABAAAFqAXMACcAKAGwAAABBwez/pn/mwAYtQIBGAQCAbj/3bQVHyMCJQErNTUAPzU1//8Arv4UBEwGMQImAYQAAAEHB5YBtAAAAAu2ATkfGwoUJQErNQAAAP//AK7+FARMBjECJgGEAAABBwfQAX8AAAALtgEQHhsKFCUBKzUAAAD//wCu/hQETAYxAiYBhAAAAQYHpScAABCxAgG4//u0JxwKFCUBKzU1//8Arv4UBEwGMQImAYQAAAEGB7JCAAAQsQIBuP/9tCYcChQlASs1Nf//AK7+FARMBjECJgGEAAABBgemNwAADbcCATkfKAoUJQErNTUAAAD//wCu/hQETAYxAiYBhAAAAQYHsyUAAA23AgEnHigKFCUBKzU1AAAA//8Arv4UBEwG4QImAYQAAAEGB6dCAAANtwIBJB4qChQlASs1NQAAAP//AK7+FARMBuECJgGEAAABBge0QgAADbcCASQeKgoUJQErNTUAAAD//wABAAAF1wXMACcAKwCyAAABBweW/3D/mwAUswEPBAG4/720DBIbBiUBKzUAPzX//wABAAAFywXMACcAKwCmAAABBwfQ/2P/mwAUswEYBAG4/7S0FQwbBiUBKzUAPzX//wABAAAHCAXMACcAKwHjAAABBwel/sL/mwAYtQIBFwQCAbj/xbQUDCMGJQErNTUAPzU1//8AAQAABv4FzAAnACsB2QAAAQcHsv7W/5sAGLUCASAEAgG4/720HQwjBiUBKzU1AD81Nf//AAEAAAbqBcwAJwArAcUAAAEHB6b+mf+bABi1AgEPBAIBuP/ZtAwfIwYlASs1NQA/NTX//wABAAAG6gXMACcAKwHFAAABBwez/pn/mwAYtQIBGAQCAbj/0rQVHyMFJQErNTUAPzU1////zgAABxEGfAAnACsB7AAAAQcHp/7c/5sAIkAOAgEQDG8MfwyvDAQMAgG4//a0ISEGBiUBKzU1ABFdNTUAAP///84AAAb8BnwAJwArAdcAAAEHB7T+3P+bAB9AFAIBEAxvDH8MrwwEDAIBCyEhBgYlASs1NQARXTU1AP//AJ//7AKgBjECJgGGAAABBgeWDgAAC7YBFhAWDwAlASs1AP//AJX/7AKgBjECJgGGAAABBgfQ9wAADrkAAf/9tBkQDwAlASs1AAD////+/+wCoAYxAiYBhgAAAQcHpf6/AAAADbcCARYYEA8AJQErNTUA////+//sAqAGMQImAYYAAAEHB7L+0AAAAA23AgEGIRAPACUBKzU1AP//ADj/7AKgBjECJgGGAAABBwem/tAAAAANtwIBVRAjDwAlASs1NQD//wAP/+wCoAYxAiYBhgAAAQcHs/6nAAAADbcCASUZIw8AJQErNTUA////r//sAqAG4QImAYYAAAEHB6f+vQAAAA23AgEXKC0PACUBKzU1AP///5P/7AKgBuECJgGGAAABBwe0/qEAAAAQsQIBuP/1tDAoDwAlASs1NQAA//8AAQAAA1IFzAAnACwA8AAAAQcHlv9w/5sAFLMBDwQBuP+htBISBQUlASs1AD81//8AAQAAA0UFzAAnACwA4wAAAQcH0P9j/5sAFLMBGAQBuP+ftAwMBQUlASs1AD81//8AAQAABGQFzAAnACwCAgAAAQcHpf7C/5sAGLUCARcEAgG4//C0FBMjBSUBKzU1AD81Nf//AAEAAARaBcwAJwAsAfgAAAEHB7L+1v+bABi1AgEgBAIBuP/otB0MIwUlASs1NQA/NTX//wABAAAElwXMACcALAI1AAABBwem/pn/mwAYtQIBDwQCAbj/3LQMHyMFJQErNTUAPzU1//8AAQAABJcFzAAnACwCNQAAAQcHs/6Z/5sAGLUCARgEAgG4/9W0FR8jBSUBKzU1AD81Nf///84AAASfBnwAJwAsAj0AAAEHB6f+3P+bAB9AFAIBEAxvDH8MrwwEDAIBGiEhBgYlASs1NQARXTU1AP///84AAASfBnwAJwAsAj0AAAEHB7T+3P+bAB9AFAIBEAxvDH8MrwwEDAIBGiEhBgYlASs1NQARXTU1AP//AHH/7ARoBjECJgBSAAABBweWAWgAAAAOuQAC//60Ih4HACUBKzX//wBx/+wEaAYxAiYAUgAAAQcH0AFcAAAADrkAAv/+tCEeBwAlASs1//8Acf/sBGgGMQImAFIAAAEGB6UbAAAQsQMCuP//tCofBwAlASs1Nf//AHH/7ARoBjECJgBSAAABBgeyMwAAELEDArj//rQpHwcAJQErNTX//wBx/+wEaAYxAiYAUgAAAQYHphcAAA23AwIpIisHACUBKzU1AAAA//8Acf/sBGgGMQImAFIAAAEGB7MMAAANtwMCHiErBwAlASs1NQAAAP//AAH/7AY4Bc0AJgAydQABBweW/3D/mwAUswIbBAK4//G0Hh4GBiUBKzUAPzUAAP//AAH/7AZKBc0AJwAyAIcAAAEHB9D/Y/+bABSzAiQEArj/0LQYGAYGJQErNQA/Nf//AAH/7Ad9Bc0AJwAyAboAAAEHB6X+wv+bABi1AwIjBAMCuP/ttBgYBgYlASs1NQA/NTX//wAB/+wHcwXNACcAMgGwAAABBwey/tb/mwAYtQMCLAQDArj/7bQYGAYGJQErNTUAPzU1//8AAf/sBzYFzQAnADIBcwAAAQcHpv6Z/5sAGLUDAhsEAwK4/3S0LS0GBiUBKzU1AD81Nf//AAH/7Ac2Bc0AJwAyAXMAAAEHB7P+mf+bABi1AwIkBAMCuP90tC0tBgYlASs1NQA/NTX//wCi/+wEeQYxAiYBkgAAAQcHlgF7AAAADrkAAf/wtB8bBBIlASs1//8Aov/sBHkGMQImAZIAAAEHB9ABZAAAAA65AAH/5bQeGwQSJQErNf//AKL/7AR5BjECJgGSAAABBgelIwAAELECAbj/5rQnHAQSJQErNTX//wCi/+wEeQYxAiYBkgAAAQYHsjsAABCxAgG4/+W0JhwEEiUBKzU1//8Aov/sBHkGMQImAZIAAAEGB6YfAAANtwIBEB8oBBIlASs1NQAAAP//AKL/7AR5BjECJgGSAAABBgezCgAAELECAbj//LQeKAQSJQErNTX//wCi/+wEeQbhAiYBkgAAAQYHpzMAAA23AgEFHioEEiUBKzU1AAAA//8Aov/sBHkG4QImAZIAAAEGB7QUAAAQsQIBuP/mtB4qBBIlASs1Nf//AAEAAAW8BcwAJwA8ATUAAAEHB9D/Y/+bABSzARUEAbj/n7QJCQcHJQErNQA/Nf//AAEAAAbGBcwAJwA8Aj8AAAEHB7L+1v+bABi1AgEdBAIBuP9+tA4OBwclASs1NQA/NTX//wABAAAG2wXMACcAPAJUAAABBwez/pn/mwAYtQIBFQQCAbj/27QcHAcHJQErNTUAPzU1////zgAABxYGfAAnADwCjwAAAQcHtP7c/5sAH0AUAgEQCW8JfwmvCQQJAgEaHh4HByUBKzU1ABFdNTUA//8Ac//sBc8GMQImAZYAAAEHB5YCHwAAAAu2AQAyLgMgJQErNQAAAP//AHP/7AXPBjECJgGWAAABBwfQAhIAAAALtgEAMS4DICUBKzUAAAD//wBz/+wFzwYxAiYBlgAAAQcHpQDPAAAAELECAbj//7Q6LwMgJQErNTUAAP//AHP/7AXPBjECJgGWAAABBweyAOkAAAANtwIBADkvAyAlASs1NQD//wBz/+wFzwYxAiYBlgAAAQcHpgDLAAAADbcCASkyOwMgJQErNTUA//8Ac//sBc8GMQImAZYAAAEHB7MAtgAAAA23AgEUMTsDICUBKzU1AP//AHP/7AXPBuECJgGWAAABBwenAMEAAAANtwIBADE9AyAlASs1NQD//wBz/+wFzwbhAiYBlgAAAQcHtADBAAAADbcCAQAxPQMgJQErNTUA//8AAQAABmsFzQAmAXZ1AAEHB5b/cP+bABSzASMEAbj/67QmJg0NJQErNQA/NQAA//8AAQAABocFzQAnAXYAkQAAAQcH0P9j/5sAFLMBLAQBuP/AtCAgDQ0lASs1AD81//8AAQAAB6YFzQAnAXYBsAAAAQcHpf7C/5sAGLUCASsEAgG4//G0JycNDSUBKzU1AD81Nf//AAEAAAemBc0AJwF2AbAAAAEHB7L+1v+bABi1AgE0BAIBuP/ntCcnDQ0lASs1NQA/NTX//wABAAAHcwXNACcBdgF9AAABBwem/pn/mwAYtQIBIwQCAbj/ZLQ1NQ0NJQErNTUAPzU1//8AAQAAB30FzQAnAXYBhwAAAQcHs/6Z/5sAGLUCASwEAgG4/1q0NTUNDSUBKzU1AD81Nf///84AAAdnBnwAJwF2AXEAAAEHB6f+3P+bACJADgIBECBvIH8gryAEIAIBuP+ttD09DQ0lASs1NQARXTU1AAD////OAAAHZwZ8ACcBdgFxAAABBwe0/tz/mwAiQA4CARAgbyB/IK8gBCACAbj/prQ4OA0NJQErNTUAEV01NQAA//8Acf/sBM0GHQImAX4AAAEGB8QAAAAOuQAC/7O0MC0PGSUBKzUAAP//AHH/7ATNBh0CJgF+AAABBgfP/QAAC7YCBjQxDxklASs1AP//AFj/7AOYBh0CJgGCAAABBgfEzAAADrkAAf/ktCkmFx0lASs1AAD//wBY/+wDmAYdAiYBggAAAQYHz8gAAAu2ATYtKhcdJQErNQD//wCu/hQETAYdAiYBhAAAAQYHxCsAAA65AAH/0rQZHAoUJQErNQAA//8Arv4UBEwGHQImAYQAAAEGB89IAAALtgFEHBkKFCUBKzUA//8ARP/sAqAGHQImAYYAAAEHB8T+sQAAAA65AAH/07QTEA8AJQErNf//AKD/7AKgBh0CJgGGAAABBwfP/rcAAAALtgEvFxQPACUBKzUAAAD//wBx/+wEaAYdAiYAUgAAAQYHxBQAAA65AAL/y7QcHwcAJQErNQAA//8Acf/sBGgGHQImAFIAAAEGB88lAAALtgIyHxwHACUBKzUA//8Aov/sBHkGHQImAZIAAAEGB8QSAAAOuQAB/6i0GRwEEiUBKzUAAP//AKL/7AR5Bh0CJgGSAAABBgfPLQAAC7YBGRwZBBIlASs1AP//AHP/7AXPBh0CJgGWAAABBwfEAM8AAAAOuQAB/9K0LC8DICUBKzX//wBz/+wFzwYdAiYBlgAAAQcHzwDVAAAAC7YBLS8sAyAlASs1AAAA//8Acf49BM0GMQImAX4AAAAnB5YBcwAAAQYHlwoAABe5AAP/6EAMSTwPHSUCDC0zDxklKzUrNQD//wBx/j0EzQYxAiYBfgAAACcH0AFIAAABBgeXCgAAGbkAA//otUk8DxwlArj/+bQ2LQ8cJSs1KzUAAAD//wBx/j0EzQYxAiYBfgAAACYHpQYAAQYHlwoAABu5AAT/6LZRRA8cJQMCuP/utDUtDxklKzU1KzUAAAD//wBx/j0EzQYxAiYBfgAAACYHshQAAQYHlwoAABu5AAT/6LZRRA8cJQMCuP/btD4tDxklKzU1KzUAAAD//wBx/j0EzQYxAiYBfgAAACYHpgwAAQYHlwoAABm5AAT/6EANUUQPHCUDAiItQA8ZJSs1NSs1AP//AHH+PQTNBjECJgF+AAAAJgez+QABBgeXCgAAGbkABP/oQA1RRA8cJQMCCDZADxklKzU1KzUA//8Acf49BM0G4QImAX4AAAAmB6cXAAEGB5cKAAAZuQAE/+hADV9SDx0lAwIGNkIPGSUrNTUrNQD//wBx/j0EzQbhAiYBfgAAACYHtPkAAQYHlwoAABu5AAT/6LZeUQ8dJQMCuP/otDZCDxklKzU1KzUAAAD//wAA/+wHvQXMACYHloqbACYAJAAAAQcBhgUdAAAAH7MAAwQDuP+gQA0sHRYuJQIBihMUAAYlKzU1KzUAPzUA//8AAP/sB70FzAAmB9CKmwAmACQAAAEHAYYFHQAAAB+zAAwEA7j/oEANLB0WLiUCAYwTFAkAJSs1NSs1AD81AP//AAH/7AiMBcwAJwel/sL/mwAnACQAzwAAAQcBhgXsAAAAIbQBAAoEBLj/oEANNCUeNiUDAuYcHAAAJSs1NSs1AD81NQAAAP//AAH/7AigBcwAJwey/tb/mwAnACQA4wAAAQcBhgYAAAAAI7QBABQEBLj/oLY0JR42JQMCuAEEtBwcAAAlKzU1KzUAPzU1AP//AAH/7AhjBcwAJwem/pn/mwAnACQApgAAAQcBhgXDAAAAIbQBAAMEBLj/oEANNCUeNiUDArIcHBMTJSs1NSs1AD81NQAAAP//AAH/7AhjBcwAJwez/pn/mwAnACQApgAAAQcBhgXDAAAAIbQBAAwEBLj/oEANNCUeNiUDArIcHBMTJSs1NSs1AD81NQAAAP///87/7AhrBnwAJwen/tz/mwAnACQArgAAAQcBhgXLAAAAK0ANAQAQAG8AfwCvAAQABLj/oEANQjMsRCUDAkAqKhUVJSs1NSs1ABFdNTUA////zv/sCGsGfAAnB7T+3P+bACcAJACuAAABBwGGBcsAAAArQA0BABAAbwB/AK8ABAAEuP+gQA1BMitDJQMCQCkpFRUlKzU1KzUAEV01NQD//wCu/hQETAYxAiYBhAAAACcHlgG0AAABBweX/tQAAAAUQA4CADEkCQglAUAVGwoTJSs1KzUAAP//AK7+FARMBjECJgGEAAAAJwfQAX8AAAEHB5f+1AAAABRADgIAMSQJCCUBCR4VChMlKzUrNQAA//8Arv4UBEwGMQImAYQAAAAmB6UnAAEHB5f+1AAAABZADwMAOSwJCCUCAQIdFQoTJSs1NSs1AAD//wCu/hQETAYxAiYBhAAAACYHskIAAQcHl/7UAAAAGUAJAwA5LAkIJQIBuP/8tCYVChMlKzU1KzUAAAD//wCu/hQETAYxAiYBhAAAACYHpjcAAQcHl/7UAAAAFkAPAwA5LAkIJQIBQBUoChMlKzU1KzUAAP//AK7+FARMBjECJgGEAAAAJgezJQABBweX/tQAAAAWQA8DADksCQglAgEnHigKEyUrNTUrNQAA//8Arv4UBEwG4QImAYQAAAAmB6dCAAEHB5f+1AAAABZADwMARzoJCCUCASQeKgoTJSs1NSs1AAD//wCu/hQETAbhAiYBhAAAACYHtEIAAQcHl/7UAAAAFkAPAwBGOQkIJQIBJB4qChMlKzU1KzUAAP//AAH/7AlABcwAJwArALIAAAAnB5b/cP+bAQcBhgagAAAAHUALAQ8EAgQqGwAsJQG4/2q0EhIGBiUrNSs1AD81AAAA//8AAf/sCTMFzAAnACsApgAAACcH0P9j/5sBBwGGBpMAAAAdQAsBGAQCAyobACwlAbj/Z7QMDAYGJSs1KzUAPzUAAAD//wAB/+wKcQXMACcAKwHjAAAAJwel/sL/mwEHAYYH0QAAACFADQIBFwQDAzIjADQlAgG4/3q0DAwGBiUrNTUrNQA/NTUAAAD//wAB/+wKZwXMACcAKwHZAAAAJwey/tb/mwEHAYYHxwAAACFADQIBIAQDBDIjADQlAgG4/3q0DAwGBiUrNTUrNQA/NTUAAAD//wAB/+wKUgXMACcAKwHFAAAAJwem/pn/mwEHAYYHsgAAACFADQIBDwQDAzIjADQlAgG4/6O0Hx8GBiUrNTUrNQA/NTUAAAD//wAB/+wKUgXMACcAKwHFAAAAJwez/pn/mwEHAYYHsgAAACFADQIBGAQDAzIjADQlAgG4/6O0Hx8GBiUrNTUrNQA/NTUAAAD////O/+wKeQZ8ACcAKwHsAAAAJwen/tz/mwEHAYYH2QAAACpAFQIBEAxvDH8MrwwEDAMDQDEAQiUCAbj/9rQhIQYGJSs1NSs1ABFdNTUAAP///87/7AplBnwAJwArAdcAAAAnB7T+3P+bAQcBhgfFAAAAJ0AbAgEQDG8MfwyvDAQMAwQ/MABBJQIBCyEhBgYlKzU1KzUAEV01NQD//wBz/j0FzwYxAiYBlgAAACcHlgIfAAABBweXAOMAAAAXuQAC//ZADEQ3ERIlAQcoLgMgJSs1KzUAAAD//wBz/j0FzwYxAiYBlgAAACcH0AISAAABBweXAOMAAAAZuQAC//a1RDcREiUBuP/4tDEoAyAlKzUrNQD//wBz/j0FzwYxAiYBlgAAACcHpQDPAAABBweXAOMAAAAZuQAD//ZADUw/ERIlAgEGMCgDICUrNTUrNQD//wBz/j0FzwYxAiYBlgAAACcHsgDpAAABBweXAOMAAAAbuQAD//a2TD8REiUCAbj//7Q5KAMgJSs1NSs1AAAA//8Ac/49Bc8GMQImAZYAAAAnB6YAywAAAQcHlwDjAAAAGbkAA//2QA1MPxESJQIBMCg7AyAlKzU1KzUA//8Ac/49Bc8GMQImAZYAAAAnB7MAtgAAAQcHlwDjAAAAGbkAA//2QA1MPxESJQIBFDE7AyAlKzU1KzUA//8Ac/49Bc8G4QImAZYAAAAnB6cAwQAAAQcHlwDjAAAAG7kAA//2tlpNERIlAgG4//+0MT0DICUrNTUrNQAAAP//AHP+PQXPBuECJgGWAAAAJwe0AMEAAAEHB5cA4wAAABu5AAP/9rZZTBESJQIBuP//tDE9AyAlKzU1KzUAAAD//wAB/+wJVgXNACYBdnUAACcHlv9w/5sBBwGGBrYAAAAfswEjBAK4/9+1PTATQCUBuP/rtCYmDQ0lKzUrNQA/NQAAAP//AAH/7AlzBc0AJwF2AJEAAAAnB9D/Y/+bAQcBhgbTAAAAH7MBLAQCuP/ftT4vE0AlAbj/wLQgIA0NJSs1KzUAPzUA//8AAf/sCpIFzQAnAXYBsAAAACcHpf7C/5sBBwGGB/IAAAAjtAIBKwQDuP/gtkY3E0glAgG4//G0JycNDSUrNTUrNQA/NTUA//8AAf/sCpIFzQAnAXYBsAAAACcHsv7W/5sBBwGGB/IAAAAjtAIBNAQDuP/gtkY3E0glAgG4/+e0ICANDSUrNTUrNQA/NTUA//8AAf/sCl4FzQAnAXYBfQAAACcHpv6Z/5sBBwGGB74AAAAjtAIBIwQDuP/ftkY3E0glAgG4/2S0NTUNDSUrNTUrNQA/NTUA//8AAf/sCmkFzQAnAXYBhwAAACcHs/6Z/5sBBwGGB8kAAAAhtAIBLAQDuP/gQA1GNxNIJQIBJTMzDQ0lKzU1KzUAPzU1AAAA////zv/sClIGfAAnAXYBcQAAACcHp/7c/5sBBwGGB7IAAAAtQA0CARAgbyB/IK8gBCADuP/ftlRFE1YlAgG4/620PT0NDSUrNTUrNQARXTU1AAAA////zv/sClIGfAAnAXYBcQAAACcHtP7c/5sBBwGGB7IAAAAtQA0CARAgbyB/IK8gBCADuP/ftlNEE1UlAgG4/6a0ODgNDSUrNTUrNQARXTU1AAAA//8Acf/sBM0F7AImAX4AAAEGAU4OAAAOuQAC//u0MDgPGSUBKzUAAP//AHH/7ATNBWoCJgF+AAABBgFN/QAADrkAAv/otC0uDxklASs1AAD//wBx/j0EzQYdAiYBfgAAACYHxAAAAQYHlwoAABm5AAP/6LVCNQ8dJQK4/7O0MC0PGSUrNSs1AP//AHH+PQTNBF4CJgF+AAABBgeXCgAADrkAAv/otDotDx0lASs1AAD//wBx/j0EzQYdAiYBfgAAACYHz/0AAQYHlwoAABe5AAP/6EAMQjUPHSUCBjQxDxklKzUrNQAAAP//AHH/7ATNBeECJgF+AAABBgFS6AAADrkAAv/ytDZCDxklASs1AAD//wBx/j0EzQXhAiYBfgAAACYBUugAAQYHlwoAABm5AAP/6LVSRQ8dJQK4//K0NkIPGSUrNSs1AP//AAAAAAUbBz4CJgAkAAABBwFOAC8BUgATQAsCABEZBQYlAg4FJgArNQErNQAAAP//AAAAAAUbBrwCJgAkAAABBwFNAEIBUgAdQBQCDxEQBQYlAn8RjxGfEa8RBBEFJgArXTUBKzUA//8AAAAABRsFvAImACQAAAEHB8T+l/+bABSzAhMDArj/ELQODgUFJQErNQA/NQAA//8AAAAABRsFvAImACQAAAEHB8/+Wf+bABSzAhADArj/KLQSEgUFJQErNQA/NQAA//8AAP/sB70FvAAmACQAAAEHAYYFHQAAAA65AAL/oLQdDgcfJQErNQABAJEExQFzBjEADgA6QBYGDAwACQMPEAwQAyADMAMDsAPAAwIDuP/AQAwfIkgDDwlfCf8JAwkAL13EK11xMhESARc5ETMxMBM0NjMyFhUUBgc1NjUiJqA9LTI3anh5LT0FzzQuRDVtegxLClUuAAAAAQHn/j0DBv+BAA0ALUAbBwEBDAwODwkEYFkQCSAJYAmgCbAJBQkPDQENAC9dL10rERIBOREzETMxMAUVFBYzMjcVBiMiJjU1AoEeJBcsNkdKWH9vLSkLdxNeZYEAAAD//wCRBFkBcwXFAQYHlgCUAAeyAAMDAD81AAAA//8A6ATZA9gF4QAGAVLmAAADAPIE7gPNBuEAFwAjAC8AXUA7HhgqJBUkGAkEMDEUBb8MzwzfDAMMQAkNSAwRCQwDHwABHwAvAAIAQBATSAAAJxsbLe8hASAhAaAhASEAL11xXTMzETMzLytdcRcyLytdMzMREgEXOREzETMxMAEiLgIjIgYHIzY2MzIeAjMyNjczBgYFNDYzMhYVFAYjIiYlNDYzMhYVFAYjIiYC+CtTTkkiMjEOXgxqYS1VTkcgLzIQXA1t/d44KCc6OicoOAGBOCYnOjonJjgF+B8kHzYubH0fJB82LnF4pDYuLjY1MTE1Ni4uNjUxMQAA//8Arv4UBEwGHQImAYQAAAAmB8QrAAEHB5f+1AAAABa3AgAqHQkIJQG4/9G0GBUKEyUrNSs1AAD//wCu/hQETAReAiYBhAAAAQcHl/7UAAAAC7YBACIVCQglASs1AAAA//8Arv4UBEwGHQImAYQAAAAmB89IAAEHB5f+1AAAABRADgIAKh0JCCUBRBwZChMlKzUrNf//AK7+FARMBeECJgGEAAABBgFSEgAAC7YBDx4qChMlASs1AP//AK7+FARMBeECJgGEAAAAJgFSEgABBweX/tQAAAAUQA4CADotCQglAQ8eKgoTJSs1KzX////NAAAEpgW4ACcAKACuAAABBwfE/jr/mwAUswEQAwG4/3m0ExMCAiUBKzUAPzX////NAAAEmAW4ACcAKACgAAABBwfP/eT/mwAUswEOAwG4/4e0EBACAiUBKzUAPzX////NAAAF0wW4ACcAKwCuAAABBwfE/jr/mwAUswEQAwG4/3m0DAwGBiUBKzUAPzX////NAAAFxQW4ACcAKwCgAAABBwfP/eT/mwAUswEOAwG4/4e0EBAGBiUBKzUAPzX//wDH/+wIjgW2ACYAKwAAAQcBhgXuAAAAC7YBBBsMAB0lASs1AAAAAAIBPwTFA2IGMQAHABYANEAeDhQAAxQIEQUXGBQLBBFvBN8EAgSAoAEBDwFfAQIBAC9dXRrNXcYQxDIREgEXOREzMTABIyYnNTMWFyU0NjMyFhUUBgc1NjUiJgNiVoVFxBhE/ew9LTI3anh5LT0E2aWKFWrB3TQuRDVtegxLClUuAAAAAAIBaATFA5YGMQAOABYANEAeBgwTFgwACQUXGAwDEQlvEd8RAhGAoBYBDxZfFgIWAC9dXRrMXcYQxDIREgEXOREzMTABNDYzMhYVFAYHNTY1IiYXNjczFQYHIwF3PS0yN2p4eS09/kQYxVB7VgXPNC5ENW16DEsKVS6rwWoVlpkAAgDyBLADzQbhABcAJABGQC4dIxUjGCAJBSUmFAW/DM8M3wwDDEAJDUgMEQkMAwAbgA8gLyBfIH8gzyDvIAYgAC9dGtzGFzIvK10zMxESARc5ETMxMAEiLgIjIgYHIzY2MzIeAjMyNjczBgYFNDYzMhUUBgc1NjUiAvgrU05JIjIxDl4MamEtVU5HIC8yEFwNbf6fNjRpcXF5agX4HyQfNi5sfR8kHzYucXhzKi5qXV0JRAU2AAAA////1P/sAqAF7AImAYYAAAEHAU7+tQAAAAu2ARETGw8AJQErNQAAAP///+D/7AKgBWoCJgGGAAABBwFN/rUAAAALtgEPExIPACUBKzUAAAD////A/+wCoAY5AiYBhgAAAQcHwv7EAAAAEEAJAwIBEhkqDwAlASs1NTUAAP///8b/7AKgBjkCJgGGAAABBwfD/soAAAAQQAkDAgEYGSoPACUBKzU1NQAA////m//sAqAF4QImAYYAAAEHAVL+mQAAAAu2ARIZJQ8AJQErNQAAAP///6X/7AKgBuECJgGGAAABBwea/rMAAAAQQAkDAgERGSUPACUBKzU1NQAA//8AHgAAApsHPgImACwAAAEHAU7+/wFSABNACwEMBSYBAg8XBgslASs1ACs1AAAA//8ALAAAAowGvAImACwAAAEHAU3/AQFSAB1AFAF/D48Pnw+vDwQPBSYBAg8OBgslASs1ACtdNQD////NAAADLwW4ACcALADNAAABBwfE/jr/mwAUswEQAwG4/3O0EREFBSUBKzUAPzX////NAAADVAW4ACcALADyAAABBwfP/eT/mwAUswEOAwG4/6q0Dw8GBiUBKzUAPzUAAgErBMUDRAYxAAcAFgA0QB4LEQADDggRBRcYCxQEDm8E3wQCBICgAQEPAV8BAgEAL11dGs1dxhDEMhESARc5ETMxMAEjJic1MxYXJRQGIxQXFSYmNTQ2MzIWA0RWiEPEHz7+uj4teXdqNzEtPgTZqYYViqHdMi5VCksMem01RC4AAAAAAgFoBMUDlgYxAA4AFgA0QB4DCRMWBgAJBRcYAwwRBm8R3xECEYCgFgEPFl8WAhYAL11dGsxdxhDEMhESARc5ETMxMAEUBiMUFxUmJjU0NjMyFhM2NzMVBgcjAjs9LXl4ajcyLT06RBjFUHtWBc8yLlUKSwx6bTVELv7vwWoVlpkAAAAAAgDyBLADzQbhABcAIwBGQC4aIBUdGCAJBSQlFAW/DM8M3wwDDEAJDUgMEQkMAwAigA8dLx1fHX8dzx3vHQYdAC9dGtzGFzIvK10zMxESARc5ETMxMAEiLgIjIgYHIzY2MzIeAjMyNjczBgYHFCMUFxUmJjU0MzIC+CtTTkkiMjEOXgxqYS1VTkcgLzIQXA1tlWp5cm9oagX4HyQfNi5sfR8kHzYucXhzVjYFRAleXGoAAAD//wCi/+wEeQXsAiYBkgAAAQYBTisAAA65AAH/+7QYIAQSJQErNQAA//8Aov/sBHkFagImAZIAAAEGAU0tAAAOuQAB//u0GBcEEiUBKzUAAP//AKL/7AR5BjkCJgGSAAABBgfCJQAAErIDAgG4/+e0Hi8EEiUBKzU1NQAA//8Aov/sBHkGOQImAZIAAAEGB8MlAAASsgMCAbj/57QeLwQSJQErNTU1AAD//wCi/hQEZgYxAiYBjgAAAQcHlgGRAAAAC7YCDycjCwAlASs1AAAA//8Aov4UBGYGMQImAY4AAAEHB9ABXAAAAA65AAL/57QmIwsAJQErNf//AKL/7AR5BeECJgGSAAABBgFS+wAADrkAAf/otB4qBBIlASs1AAD//wCi/+wEeQbhAiYBkgAAAQYHmhQAABKyAwIBuP/mtB4qBBIlASs1NTUAAP//AAAAAASHBz4CJgA8AAABBwFO/+gBUgATQAsBCQUmAQIMFAcCJQErNQArNQAAAP//AAAAAASHBrwCJgA8AAABBwFN/+gBUgAdQBQBfwyPDJ8MrwwEDAUmAQAMCwcCJQErNQArXTUA////zQAABbAFuAAnADwBKQAAAQcHxP46/5sAFLMBDgMBuP9ptA4OBwclASs1AD81////zQAABawFuAAnADwBJQAAAQcHz/3k/5sAFLMBCwMBuP/JtAwMBwclASs1AD81//8AAQAABR8FzAAnADMAsAAAAQcH0P9j/5sAFLMCHwQCuP9dtBMTBwclASs1AD81AAMA/ATjA6IGOQAIABQAHwA7QCEPCRoVFQAECQQgIRcMDB0SbwUBBYDvAQEgAQGAAaABAgEAL11xXRrNXcQyMhEzERIBFzkRMxEzMTABIyYmJzUzFhclNDYzMhYVFAYjIiYlNDMyFhUUBiMiJgK0Vj9xG8UcQP5INigmODgmKDYB6V8lOTIsKjUE8k6uNxR7tEA2Li81NTIyNWQvNS06MgADAPwE4wOiBjkACAAUAB8AO0AhDwkaFQQVCAkEICEXDAwdEm8CAQKA7wgBIAgBgAigCAIIAC9dcV0azF3EMjIRMxESARc5ETMRMzEwATY3MxUGBgcjJzQ2MzIWFRQGIyImJTQzMhYVFAYjIiYB6TojxB1xPVbtNigmODgmKDYB6V8lOTIsKjUFCpaZFDutS1g2Li81NTIyNWQvNS06MgAAAQGTBNkCtAYdAAcAJEAUAwAICW8E3wQCBICgAQEPAV8BAgEAL11dGs1dERIBOTkxMAEjJic1MxYXArRWiEPFGEQE2amGFWrBAAAA//8Ac/49Bc8GHQImAZYAAAAnB8QAzwAAAQcHlwDjAAAAGbkAAv/2tT0wERIlAbj/0bQrKAMgJSs1KzUA//8Ac/49Bc8ESgImAZYAAAEHB5cA4wAAAA65AAH/9rQ1KBESJQErNf//AHP+PQXPBh0CJgGWAAAAJwfPANUAAAEHB5cA4wAAABe5AAL/9kAMPTAREiUBLS8sAyAlKzUrNQAAAP//AHP/7AXPBeECJgGWAAABBwFSAKYAAAALtgEAMT0DICUBKzUAAAD//wBz/j0FzwXhAiYBlgAAACcBUgCmAAABBweXAOMAAAAZuQAC//a1TUAREiUBuP//tDE9AyAlKzUrNQD////N/+wGUgXNACcAMgCPAAABBwfE/jr/mwAUswIdAwK4/+K0GBgGBiUBKzUAPzX////N/+wGBwXNACYAMkQAAQcHz/3k/5sAEkAKAhoDAi0cHAYGJQErNQA/Nf///80AAAaFBc0AJwF2AI8AAAEHB8T+Ov+bABSzASUDAbj/3LQgIA0NJQErNQA/Nf///80AAAY6Bc0AJgF2RAABBwfP/eT/mwASQAoBIgMBJyQkDQ0lASs1AD81//8ATv/sCOIFzQAmAXYAAAEHAYYGQgAAAA65AAH/4LQvIBMxJQErNQABAekE2QMKBh0ABwAkQBQHBAgJbwLfAgICgKAHAQ8HXwcCBwAvXV0azF0REgE5OTEwATY3MxUGByMB6T4fxEOIVgTyoYoVhqkAAAAAAQCeBMUBfwYxAA4AOkAWAwkGAAkDDxADEAwgDDAMA7AMwAwCDLj/wEAMHyJIDA8GXwb/BgMGAC9dxCtdcTIREgEXOREzMTABFAYjFBcVJiY1NDYzMhYBcT4teXhpNzEtPgXPMi5VCksMem01RC4AAAH/1f7wACsFBgADAAixAgMALy8xMBMRIxErVgUG+eoGFgAAAAH/If7wAN8FhQAOABVACwsIAg4HCQMNCAUBAC8ZLxczMTATIxEHJzcnNxc3FwcXBycrVn81qKg1qqo1qKg1f/7wBVh/N6imN6qqN6aoN38AAf/X/vABsgWFAAoAErYBAAQKBAcGAC8vMzMSOTIxMAEHJzcjESMRISc3AbLdN4P4UgFKgzcEqts1ffpvBeF9NwAAAAAB/kz+8AApBYUACgAStgUGAwcDCQEALy8zMxI5MjEwEyMRIxcHJzcXByEpVPiFN9/fN4UBTP7wBZF9NdvbN30AAQBSApEEQgMnAAMAEbUAAwQFAAEALzMREgE5OTEwEzUhFVID8AKRlpYAAP//AQn+EgNgBhQAJwBf/yAAAAAHAF8A4QAAAAAAAgAZA8ECxwW2AAcADwAaQAwCBgoOBBARAwsHDwMAPzPNMhESARc5MTABFhMjJgInNyMWEyMmAic3AmAkQ4UtahwPuSRDgzZlFQwFtuz+914BFG0W7P73cgEUWRYAAAAAAf/X/vABsgTRAAUACrICAAMALzMvMTATESMRIRUrVAHbBIH6bwXhUAAAAAAB/k7+8AApBNEABQAKsgQAAQAvMy8xMAE1IREjEf5OAdtUBIFQ+h8FkQAAAAH/Ev7wAO4FgwAHABdADAEABRAFIAUDBQYDBgAvLxDNXTIxMBMjESMRIxEh7sNWwwHcA9H7HwThAbIAAAH/Ev7wAO4FgwALABtADgoBAAUQBSAFAwUDBQkGAC8zMy8vXTMzMTATIxEjESMRIRUhESHuw1bDAdz+dAGMA9H7HwThAbJQ/uwAAAAB/xL+8ADuBYMACwAbQA4ABwALEAsgCwMLBAkDBAAvMy8Qxl0yMjEwAyERITUhESMRIxEj7gGM/nQB3MNWwwQfARRQ/k77HwThAAD//wCFA6YENgW2ACYABQAAAAcACgLuAAD////6BhQEBgacAgYAcQAAAAQAk//jAZEFzQALABcAIwAvAMVAMgwYJAMAEh4qAwYABjAxKhUBAw8VHxUCEgUVIAsOSA8VfVkPDwknJRsBAwAbEBsCEgUbuP/gQD8LDkghG31ZUCFgIQJgIcAhAg8hHyFPIQMMISEJJyotAQMPLR8tAhIFLSALDkgnLX1ZJwQlAwEDAAMQAwISBQO4/+BACQsOSAkDfVkJEwA/KwArX15dX10YPysAK19eXV9dERI5GC9eXV1xKwArX15dX10REjkYLysAK19eXV9dERIBOTkRFzMRFzMxMDc0NjMyFhUUBiMiJhE0NjMyFhUUBiMiJhE0NjMyFhUUBiMiJhE0NjMyFhUUBiMiJpNAPz1CRDs9QkA/PUJEOz1CQT49QkQ7PUJAPz1CRDs9Qm9CSUhDQ0lKA3lCSUhDQklK/qVESEhEQklKA3lCSUhDQ0lKAAAAAAH/Ev7wAO4FgwAPACFAEQgAAAAPEA8gDwMPBA0HAwMEAC8zETMvEMZdMhEzMTADMxEjNSEVIxEzFSMRIxEj7sPDAdzDw8NWwwQfARRQUP7sTvsfBOEAAAAC/xL+8ADuBYMAAwALABtADgAIAAQQBCAEAwQFCgMFAC8zLxDNXTIyMTADIREhAxEhESMRIxGeATz+xFAB3MNWBB8BFP6eAbL+TvsfBOEAAAH/EP7wAPAFgwAFABVACgEFAjACQAICAgMALzNdETMvMTATIxEDIQMrVsUB4MX+8AUkAW/+kQAAAf8Q/vAA8AWJAAYAH0ARAK8DAc8DAQMFApAFAcAFAQUAL11xLxDNXXEyMTATESMRIxMTK1bF8PAD0fsfBOEBuP5IAAAC/xD+8ADwBYUABgAKAB5ADgMJBwUEBAoGAgIEAQgEAC8zLxEzETMzEhc5MTATIxEnNxcHNycHFytWxfDwxVR/f3/+8AUEttvbtrZxcXEAAAH/Ev7wAO4FgwANAB1AEAkABgMADRANIA0DDQQLAwQALzMvEMZdFzIxMAMzESM1IREzFSMRIxEj7sPDARnDw1bDBB8BFFD+nE77HwThAAAAAgAnAjkCpAXHAAsAFQAgQA4AEQwGEQYWFwkTHwMOIQA/Mz8zERIBOTkRMxEzMTATFBYzMjY1NCYjIgYFECEiJjUQITIWvE9ZWlJSWllPAej+wJ6fAT2foQQApKKhp6WhoaX+N+zdAcXoAAAAAgApAjkCqAXHABYAIgAyQBkFGgoAERoAGiMkHQAOEA4CDg4UBwIfFxQhAD8zPzMSOS9dMxESATk5ETMRMxEzMTATECEyFxUmIyIGBzM2NjMyFhUUBiMiJgUyNjU0JiMiBhUUFikBukoxN0yMlAsIHG9Ve5WnjpuvAUZOYFVTT3BpA8MCBA95E5ajKzuTf46m015cXk5YVTxadQAAAgAjAjkCogXJABUAIQA0QBsFGRkQAAoKHxADIiMcDw0fDQINDQMWEx8IAyEAPzM/MxI5L10zERIBFzkRMxEzETMxMAEQBiMiJzUWMyATIwYjIiY1NDYzMhYlIgYVFBYzMjY1NCYCot7WUDEsXQESFQtHjoOXrImZsf62TlxSVFRsZQRE/vT/D3sVAT5lk4KHps1YYFJOXFY5YG3//wBa/1UC7AJuAQcFywAA/K0ACbMBAARRAD81NQAAAP//AGj/VQMbAnABBwXRAAD8rQAJswEAAFEAPzU1AAAA//8AaP9VA1ACcAEHBdoAAPytAAmzAQADUQA/NTUAAAAAAQAn/2ADIwJiAAsANEAZAwkJBgELBgAFBwcACwMMDQkDCwQBUggLUAA/Mz8zEjk5ERIBFzkRMxEzETMRMxEzMTAlATMTEzMBASMDAyMBRv7vu7a2uP7wAR+5xMW65wF7/voBBv6F/nkBFf7rAAAA//8AYv9VAxQCcAEHBdIAAPytAAmzAQAGUQA/NTUAAAAAAQBKAAAERgXLAB8AjEBSCx4RBhoeHhUBGAAAHAEGBCAhAx0UGhodbVkqGjoaAgkaAQga+BoCEoAaAeAa8BoCDBoBFgMaGgEWFhltWQAWAQ8DFhYBCQkObVkJBAEebVkBEgA/KwAYPysREgA5GC9fXl0rERIAORgvX15dXXFeXXFxKxEAMxEzERIBFzkRMxEzMxEzETMRMzEwISERIyICNTQSMzIXByYjIgYVFBYzMxEhFSERIRUhESEERv3nJ93f58mffkJvZnqKjIgdAhn+jwFW/qoBcQHPAQLy7wEZQIM6z7K0twGkh/7jh/66AAAAAwBk/4kEaAYSACIAKgAwAIRASQABHB0NDxAuJhQrCgMrFyYtDRAHKAEhAwYGISkoEBItFRcJMTIUDi0oBw0Ha1kQFQ4DDQYuIx8ja1kiTxtfGwKfGwEbGgEbAx8ALxczL11xMysRADMzGC8XMysRADMzGC8zERIBFzkRMxEzETMRMxEzETMRMxEzETMRMxEzMxEzMTABBxYXByYnAzI3FQYGIwcjNyYnByMTJhE0Ejc3Mwc3MzIXNwMiBwMWFxMmARQXEwYGBAopRkFHOyXHhbBRmWolgSdZRy+BPOT04SeBIxcZSC4lly0nv0RYyyn9/FqheIMGEsgXH5McDfwtOZchG7S7DB/mASWpAX78AVM0uqoCCLD+vwb8UiwQA+oG/gLNgQMdOe4AAAAAAQBz/+wETAXLACYATUAoHxMlCQkiGAINIhMFJyglCSMAAAVrWQAAEBYWHGtZFgQiCRAJa1kQEwA/KxEAMxg/KxESADkYLysAGBDEEjkREgEXOREzETMRMzEwATIXByYjIgYVETY2NxUGBiMgABEQACEyFwcmJiMiAhEUEhcRMxc2A747PhRCK11+So5fVpli/uD+0gFAARrZpko7lmLB54x+kgpfA4sMmhGSbP6NARgglyMYAYUBbAFcAZJWlB8x/r3+7d7+0jMC4XF/AAAAAAEArv8fBtUFVAAnAF9ANAABEyAcHB0PFBABJgcICCYQHQQoKSAmARQRBR0kHg8LGCQYXVlAJwEnJwMkEBMIEBMDHRUAPxczLz8zMy9dKxEAMxg/ERIXORESARc5ETMRMxEzMxEzETMzETMxMAEDNjMyFhURIxEQIyIGFREjEQEjATU0JiMiBhURIxEzFzM2NjMyFxMFNYNSXrq5st+ZkLP+6o8BpW10mI20kRsKL6tq7FW9BVT+6SHA0/01AsMBBLK3/aIBdf2qA4sZgoK61P3HBEqWUFqbAZEAAAAFABQAAAR/BbYAHQAhACUAKgAvAKxAZy0gKSQqKAIcHBoiKCYDGRkEABofJCAjDREVCiEsFSsPExMrLCMkGgYwMSIgERQtGAYcHR0cbVkAHRAdAgkDHQMoCQ0QHyUGAgMDAm1ZHwMBLwOvA78D3wP/AwUDAyovAxoLBQMWGhIAPzM/MxIXOS9dcSsREgAXORgQxl9eXSsREgAXORESARc5ETMRMxEzMxEzMxEzETMRMzMzERczETMRMxEzETMRMzEwEzUjNTMRMxYWEzMRMxEzFSMVMxUjESMDIxEjESM1JSMXMyEzJyMDFhczJwEnIxYXtKCg1Qk0hvGioKCgoNfC8qCgAynCPYX+F8A9gxAJA1ZaAgANVkcUApGogwH6D4b+mwH6/gaDqIP98gIO/fICDoOoqKgBe5Nl+Px7378gAAMAqv/sBlgFtgANABYAPAB0QD0vFwsHChIcKQ4BAQIHEjYpFyIiKRICBD0+JSIpORc2LDNeWSwQCgAOAGtZDg4DDAISAxZrWQMDGiBeWRoVAD8rABg/KwAYPzMSOS8rEQAzGD8rERIAORESORESARc5ETMRMxEzETMRMxEzETMRMxEzMTABESMRITIWFRQGBwEjASczMjY1NCYjIwEUBiMiJzUWFjMyNTQmJy4CNTQ2MzIWFwcmJiMiBhUUFhceAgFYrgEr4NhuawEhw/8AuoeHc3yCgwUAta2oXjWYObRHb2xeLrKUV4I7QjllOUdOSXFqXi4CXP2kBbbP0I/HMv1xAlycjIqPfvwSm6ZBpiUxoj1SQDxabkmEnywiiSAkSTw7UkA7Wm0ABwAUAAAFSAW2AB8AIwAnACsAMAA1ADsAy0B/DhUCBR4eBAcBKAA6HRo5ICkjKi4ICy8nIiQhNBkTJQ8MFRYREhIWDCYlMxkhIi8IKik5HQABBBI8PSggJBMWNBo6CB4fHx5tWQAfEB8CCQMfAwcuCw8SJyMrCAIDAwJtWR8DAS8DrwO/A98D7wP/AwYDLDYxAwQcDQkFAxgcEgA/Mz8zMxIXOS9dcSsREgAXORgQxl9eXSsREgAXORESARc5ETMRMxEzETMRMxEzETMRMxEzETMRMxEzETMRMxEzETMRMzMRMzEwEycjNTMDMxMzEzMTMxMzAzMVIwczFSMDIwMjAyMDIzUhMycjBTM3IwUzNyMBBgczJgE2NyMWBTY2NyMWsB1/aVikUNtevGHbTKFUZXsblqxWuWL0XLZctQI8wh6HATt3GLD9w3IfrAGDDSJYHwEtBRxJIv2iARIOSCACkaiDAfr+BgH6/gYB+v4Gg6iD/fICDv3yAg6DqKioqKgBvYO3ovx3Y9bbVh67WLsAAAEAHwAABJMFtgAVAExAKhQEBBcRCQkOCgIDFQoMBRYXAwcMDQxtWRURAAMvDc8NAg0NChMPAwUKEgA/Mz8zEjkvXRczKxEAMzMREgEXOREzMxEzETMRMzEwASEVIQEjASMHESMRIzUzETMRMwEzAQKBAZr+vAG8zv5WKXOusrKuNQIAyf3+AzOH/VQCrH/90wKshwKD/X0Cg/2HAAEAJwAABG8FtgAXAHFAQBAMAAQICBENCQIGFgYJDBMFGBkMCg0EBwYGBQsQDhEDAAIGAQ8LBQAFEAUCCQMPAUAFAQUBFAkSFxMUE2lZFAMAPysRADMYPxI5OS8vGhDNX15dEM0REhc5ERIXORESARc5ETMRMzMzETMzETMxMAE3FwUVNxcFESMRByclNQcnJREhNSEVIQKo8En+x/BJ/se48EoBOvBKATr+NwRI/jkDxaVs18+mbdf+RgFQpG3XzKNs1wG4oqIAAwAx/hQHeQXLABIAHgA5AF9ANiMwABwcFwwwKisoNwg6OykpKB8QE2xZDxAfEAITAxAQKB8MGx8za1kfBCgra1koEgQZbFkEEwA/KwAYPysAGD8rABg/ERI5L19eXSsREgA5GC8REgEXOREzETMxMAEUBgYjIiYnIwYGAyMTNjYzMhYlIgYHBxYzMjY1NCYBMgQSFRQCBCMjEzMDMzIkEjU0JCMiBgcnNjYHeWW5fDVlGggQFUisqCXJmZmn/sBTahoWKG9jg0r7VMoBNKbM/ovu6vyw2xPCASGZ/vniVsZKPkzkAYFvumwpHF5+/r8DJbO8mwxodGpYknJIUgOymv7hvvv+etMEoPv8rgFA0Nb/JiKSJS0AAAIAGf4UBCEFzQAiACsAWkAvCxoZFScdAxoABgYjEREaHRUXBSwtEyoUIBUYGBcgGRQTFxIgJWxZIAQJDmxZCRsAPysAGD8rABg/PzMREjkvMhESOTkREgEXOREzMxEzETMRMxEzETMxMAEUAgcSEhUUBiMiJzUWMzI2NTQLAgcjARMTJgI1NDYzMhYHNCMiFRQSFzYEHXZsg2OjkEhSUkg9UKjqqn6oASakrlBVloaBlKZxdTsufQSkhP6zrP7U/sFqlKoVmiFZV6UBiv5mARP6Ah/+1QEtpQE5eKSynZ2sxFL+/V/0AAAEABQAAAR/BbYAGgAhACYALADLQH8qIwYAEw8fIiwDDAwVEQ0BBAQbABkZGx0gIw0PBy0uAR4TFBNuWRkiIBQBAlAUAYAUkBQCABQQFJAUoBSwFAUJAxQQBysPEA9uWQQfDxAfEC8QAwkDEBALFgssa1k/CwFPC18Lrwu/C88LBQALEAvQCwMMAwsLFg0SFiZrWRYDAD8rABg/EjkvX15dXXErERIAORgvX15dMzMrEQAzMxgQxl9eXXFyX10yMisRADMzERIBFzkRMxEzETMRMzMzERczETMRMxEzMTABIxcUBzMVIwYEIyMRIxEjNTM1IzUzESEgEzMFNCchFSE2JSEmIyMTMjY3IRUEf4UECImmN/8Av3Kqs7OzswE/AYpVmv7LBv4tAdEI/icBsErXj2h5nyr+VgQlSidFb5GS/iMDAG+2bgEj/t3AKSm2PeeM/VZBSosAAAMAff9cBTsGEgAVABwAIQBpQDkMEhYEEB4KAxUVBxkAEiAgAAQDIiMQHWlZEBAUCghACQ5ICAgHChoPCg9pWQoEGR4BFBQea1kAFBMAP80rEQAzETMYPysRADMRMzMYLysREjkvKxESARc5ETMRMzMzERczETMRMzEwBTUkABEQACU1MxUWFwcmJxEhEQYHFQEQEhcRBgIBETY3EQL8/s7+swFSAS2D38lGtqwBvM7u/b7j3NTrAkKOdqSSEgGIAVMBPwGHIk9HBFKgTwX92/0zQwaSA3/++v7XGwSQIP7N/pL+KwMgAbIAAAAAAwAAAAAE2wW2ABcAGwAiAHtAQw0IBhMPDw4LERsVABoCBgYEBAoZGh0VEhEOCSMkIA0WAxsTFBNtWQAcDxQfFAIJAxQQBwsPEA9tWQQYEBANFgMJDRIAPzM/EjkvMzMrEQAzMxgQxl9eXTIyKxEAMzMREjkREgEXOREzETMRMxEzETMRMxEzETMyMTABIRUhFzMVIxMjAyEDIxMjNTM3ITUhEzMBISchNzMnJicGBgORATj++D3LnK7Cqv37p8Oumsk8/vsBNMTB/s0BpTv+zy3XAkkiEFADk4Oog/4bAeX+GwHlg6iDAiP8sqiDB8+FT/UAAAEAFP/sBFAFywAwAIdATw4uFx0dJhUELy8sIC4VCAgbAy4sBTEyGAQFBG1ZFQ8FHwUCCQMFMB4vMC9tWRsPMB8wPzBPMG8w3zDvMAcQAzAwKRAQC2lZEAQpI2lZKRMAPysAGD8rERIAORgvX15dMysRADMYEMZfXl0yKxEAMxESARc5ETMRMxEzETMRMzMRMxEzMTATNjY3ITUhNjU0JiMiByc2MzIEFRQHMxUjBgcHIRUhBhUUFjMyNxUGBiMiJDU0NyM11zmbev3vAvo2k4KTqDqvwtEBAhlrwVqwMwH+/Qg/paS64EXSe/X+6xdjApEvTC2DOV9lcE6eUsqrWEKDVj8Tgzlebn5hsSIt3L9GQYMAAAIAff9cBM8GEgAWAB0ATkAqFwQKEBYWBxoADBMABAQeHxsPCg9pWQlACQ5ICQkHCgQaEBUQaVkBABUTAD/NMysRADMYPzMzLysrEQAzERIBFzkRMzMzETMzETMxMAU1JAAREAAlNTMVFhcHJicRNjcVBgcVARQSFxEGAgLd/tr+xgFCAR6D1JtKm4qJrZSi/d/VycLcpJITAYcBVQFAAYsdTUcKTJxGCPtmBTWgNgOSA3///tIbBJAe/ssAAQCiAAAD8gW2ABkAeEBCEQ4JGAUZGRATCw4OARgIDQ0YEwMaGw4ZABltWQsAAAYTEBQUEG1ZrxS/FAIAFBAUwBQDCQMUFAYSEgkFBgVtWQYDAD8rEQAzGD8SOS9fXl1dKxESADkSORgvMysRADMREgEXOREzETMzETMRMzMRMxEzETMxMBMhJiYjIzUhFSEWFyEVIQIFASMBNTMyNjchogGLFq2bLQNQ/n1iEgEP/vUd/qUBh9H+fS2upQ3+cwRacmeDg06Lg/7fMf17AqRaZHUAAP///iIDYAHeBvkABwAN/c4A5QAAAAQAZP/sBkQFywAHABIAIgAyAF1ANyMTAAkJCg8DGysrAwoTBDM0AAAIAQgICw8KHwp/Co8KBAoKFwcACxALcAuACwQLCx8vFwQnHxMAPzM/MxI5L10zETkvXRI5L3EzERIBFzkRMxEzETMRMxEzMTABMzI1NCYjIxERIxEhMhYVFAYjJTQSJDMyBBIVFAIEIyIkAjcUEgQzMiQSNTQCJCMiBAIC5ZCqU1mOmwEvqJuphvzXyAFeysgBXsrC/qLQz/6iw22sASusrAEqraz+1ays/tatAtuiUUn+Rf6/A3+NjIKjf8gBXsrI/qLKxf6m0M8BWsas/tatrAErrKwBKq2s/tUABAAK//gFlgW2AAcADAAyADYAdUBFJg0TIAo1LSANGRkzIAcINQwECDc4NgM1EgKQDAFFDAELDAEMDAUAEARwBAIEBAoFAzANLR0ZICogI1AjAkAjASMjFxASAD8zMy9dcTMREjkREjk/MzMvXTMSOS9dXV0zPz8REgEXOREzETMRMxEzETMxMAEnIwcjATMBAycnBwcBFAYjIiYnNRYWMzI1NCYmJyYmNTQ2MzIWFwcmJiMiBhUUFhcWFgMBIwECH0j4SYwBEYEBEPhFFRJGBJSjiT50JSeCM5sbPz9pXIN3O3krIyZpLTk2OFxxWpb83ZUDIgL2yMgCwP1AATPDQ0bA/JNbaRUSfRQeTiIhIxUkX1BbaBsUbREYHykrLRwmYQSj+koFtgABAD0AAALPBEoACQAuQBgCCQUFBwMDCgsIB11ZCAgDAA8DBF1ZAxUAPysAGD8SOS8rERIBFzkRMzMxMAEzESE1IREhNSECGbb9bgHc/kMBvQRK+7aTAV+TAAAA//8ALv/wBhgFtgAnAhcCYAAAACYAe+IAAQcAdQOH/bcAB7ICFhIAPzUAAAD//wAx//AGLQXJACcCFwKeAAAAJgB0AAABBwB1A5z9twAHsgIkEgA/NQAAAAABAEb/8gO0BFgAFgAmQBQIFBQOAwMXGAUAXVkFEAsRXVkLFgA/KwAYPysREgEXOREzMTABIgcnNjMyABEQACMiJic1FjMyNjU0JgGRcJZFlLX+ASf+7/1iiUSocq24vgPFQo1I/tT++/7y/tkXGpMx2MjB3wAAAAABAaIAZAZeAkQADQAxQB0LAAkCDQIABQQODwgAAxADcAOAA5ADBQMACAMDCwAvFzMvXS8REgEXOREzETMxMAEWFyMmJzU2NzMGByEVAoE5Pkh/j49/SD45A90BKUSBlkgkSJaBRFYAAAEBEP/DAvAEfwANAB5ADQwNCQ0CAw4PCQIFDQUALy8QxDIREgEXOREzMTABBgc1NjczFhcVJicRIwHVRIGWSCRIloFEVgOgOj1If4+Pf0g9OvwjAAEBogBkBl4CRAANADFAHQIJAAsGCwkMBA4PAAkQCXAJgAmQCQUJAgwCCQMNAC8XMy8vXRESARc5ETMRMzEwASYnMxYXFQYHIzY3ITUFfzk+SH+Pj39IPjn8IwF/RIGWSCRIloFEVgAAAQEQ/8MC8AR/AA0AHEAMAAsDCwgDDg8CCQYMAC8vxDIREgEXOREzMTAlNjcVBgcjJic1FhcRMwIrRIGWSCRIloFEVqI5Pkh/j49/SD45A90AAAAAAQGiAGQGXgJEABcAP0AlCwAJAg4VDBcSFxUCAAUGGBkVAAMQA3ADgAOQAwUDDggACAMDCwAvFzMvMy9dMxESARc5ETMRMxEzETMxMAEWFyMmJzU2NzMGByEmJzMWFxUGByM2NwKBOT5If4+Pf0g+OQL+OT5If4+Pf0g+OQEpRIGWSCRIloFERIGWSCRIloFEAAAAAAEBEP/DAvAEfwAXAChAEgIUDBcJDw8XFAMYGQ4VEgkCBQAvxDIvxDIREgEXOREzETMRMzEwAQYHNTY3MxYXFSYnETY3FQYHIyYnNRYXAdVEgZZIJEiWgUREgZZIJEiWgUQDoDo9SH+Pj39IPTr9Ajk+SH+Pj39IPjkAAAACARD/SALwBH8AAwAbADBAFhgGAxAbEw0CAhsDAxwdAwASGRYNBgkAL8QyL8QyzjIREgEXOREzMxEzETMzMTAFIRUhEwYHNTY3MxYXFSYnETY3FQYHIyYnNRYXARAB4P4gxUSBlkgkSJaBRESBlkgkSJaBRGhQBFg6PUh/j49/SD06/QI5Pkh/j49/SD45AP///nkAAAKPBbYCBgIXAAD//wCTAkgBkQNeAgYAeQAAAAEBmAAABmAExwAFABhACQIFBQQGBwIFAAAvLzMREgE5OREzMTABMxEhFSEBmF4Eavs4BMf7l14AAQEX//4EqgQIABMAHkAMEwAKCwALFBULAA8FAC8zLzIREgE5OREzETMxMAURNDY2MzIWFhURIxE0JiMiBhURARdy0YOD03dmxaCiwAICAJXwhYXyk/4AAgK+5OHD/gAAAwBkAPQESARQAAMABwALAEBAJggABAsDBwQHDA0EUAUBBQBfAQEBCAUBAw8JLwk/CW8J3wnvCQYJAC9dFzMvXTMvXTMREgE5OREzMxEzMzEwEzUhFQE1IRUBNSEVZAPk/BwD5PwcA+QDvJSU/TiTkwFklJQAAAAAAgCeAAAENwSBAAQACQAeQAwFAAQGAAYKCwUACAIALzMvMhESATk5ETMRMzEwMxEBARElIREBAZ4BzAHN/LcC+f6D/oQCewIG/fr9hVICBgGq/lb//wBqAQYELQMbAEcAbgSTAADAAEAAAAAAAQIj/hQD0waqABUAHEALAAEBCBYXCwUBEQUALzMvEM0REgE5OREzMTABIxE0NjMyFhUUBiMiJyYnJiMiBwYVArSRqH0/TDMlHwwRJiERIgsG/hQG3MT2QC8pMwoJKScnI2kAAAEBBP4UArQGqgAUABpACgIUCBQVFgsRBQAALy8zzRESATk5ETMxMAEzERQGIyImNTQ2MzIXFhcWMzI2NQIjkaKFOVAzIyMZCh4fERwZBqr5I8P2Pi8nNRAEKSUzfwAAAAH/9gKmBbQDNwADABG1AwUABAABAC8zEQEzETMxMAM1IRUKBb4CppGRAAAAAQHX/hQCaAfJAAMAE7YCAwMEBQMAAC8vERIBOREzMTABMxEjAdeRkQfJ9ksAAAAAAQKN/hQFtAM3AAUAGkAKAgcEBQUGBwUDAAAvMi8REgE5ETMRMzEwASEVIREjAo0DJ/1rkgM3kftuAAAAAf/2/hQDHwM3AAUAGEAJAAMEBAYHBAABAC8zLxESATkRMzIxMAM1IREjEQoDKZICppH63QSSAAABAo0CpgW0B8kABQAaQAoEBwIFBQYHBQIAAC8vMxESATkRMxEzMTABMxEhFSECjZIClfzZB8n7bpEAAAAB//YCpgMfB8kABQAYQAkABQICBgcAAQMALy8zERIBOREzMjEwAzUhETMRCgKXkgKmkQSS+t0AAAECjf4UBbQHyQAHACBADQQJAgYGBwcICQUCBwAALy8vMxESATkRMxEzETMxMAEzESEVIREjAo2SApX9a5IHyftukftuAAAAAAH/9v4UAx8HyQAHABxACwAFAgYGCAkAAQYDAC8vLzMREgE5ETMzMjEwAzUhETMRIxEKApeSkgKmkQSS9ksEkgAB//b+FAW0AzcABwAeQAwDCQAFBgYICQYEAAEALzMyLxESATkRMzIRMzEwAzUhFSERIxEKBb79a5ICppGR+24EkgAAAAH/9gKmBbQHyQAHAB5ADAcJAAUCAggJAAUBAwAvLzMzERIBOREzMhEzMTADNSERMxEhFQoCl5IClQKmkQSS+26RAAAAAf/2/hQFtAfJAAsAKEARBw0ABQkJAgoKDA0IAAUBCgMALy8vMzMyERIBOREzMxEzMhEzMTADNSERMxEhFSERIxEKApeSApX9a5ICppEEkvtukftuBJIAAAL/9gHyBbQD7AADAAcANkAdAwcHCQAEBAgEXwUBAwWoAAHIAAEGALABAQ8BAQEAL11dM19dcS9fXTMRATMRMxEzETMxMAM1IRUBNSEVCgW++kIFvgNakpL+mJGRAAAAAAIB2f4UA9MHyQADAAcAHkAMAgMGBwMHCAkHAwQAAC8yLzMREgE5OREzETMxMAEzESMBMxEjAdmRkQFpkZEHyfZLCbX2SwAAAAECjf4UBbQD7AAJAD5AIQIGBgsECAgJCQoLB18EAQMECagDAcgDAQYDsAABDwABAAAvXV0yX11xLy9fXTMREgE5ETMRMxEzETMxMAEhFSEVIRUhESMCjQMn/WsClf1rkgPskteR/CIAAQHZ/hQFtAM3AAkAJkAQAQsHCAMECAQKCwQIAgYGCQAvMxEzLzMREgE5OREzETMRMzEwARUhESMRIxEjEQW0/h+R2JEDN5H7bgSS+24FIwACAdn+FAW0A+wABQALAEJAIwIICA0EBQoLBQsMDQlfBgEDBgsFqAMByAMBBgOwAAEPAAEAAC9dXTJfXXEvMy9fXTMREgE5OREzETMRMxEzMTABIRUhESMBIRUhESMB2QPb/LaRAWkCcv4fkQPskvq6BG+R/CIAAAAB//b+FAMfA+wACQA6QB8EAAAHAggICgsAXwEBAwEIqAQByAQBBgSwBQEPBQEFAC9dXTNfXXEvL19dMxESATkRMzMyETMxMAM1ITUhNSERIxEKApf9aQMpkgHykdeS+igD3gAAAf/2/hQD0wM3AAkAIkAOAAcIAwQIBAoLBAgGAAEALzMyLzMREgE5OREzETMyMTADNSERIxEjESMRCgPdkdiRAqaR+t0EkvtuBJIAAAL/9v4UA9MD7AAFAAsAQEAiBAkJBgcBAgcCDA0JXwoBAwoCB6gEAcgEAQYEsAUBDwUBBQAvXV0zX11xLzMvX10zERIBOTkRMxEzMhEzMTABESMRITUBIxEhNSED05H8tAJ0kf4dAnQD7PooBUaS+igD3pEAAQKNAfIFtAfJAAkAPEAgBAgICwIGBgkJCguoBQHIBQEGBbACAQ8CAQIJXwYBBgAALy9dMy9dXTNfXXEREgE5ETMRMxEzETMxMAEzESEVIRUhFSECjZIClf1rApX82QfJ/COS15EAAAABAdkCpgW0B8kACQAkQA8ECwgFAgkFCQoLAgUIAAYALzMvMzMREgE5OREzETMRMzEwATMRIRUhETMRMwNCkQHh/CWR2AfJ+26RBSP7bgAAAAIB2QHyBbQHyQAFAAsAQEAiCgQEDQIFCAsFCwwNqAsByAsBBguwCAEPCAEIBV8CAQIGAAAvMi9dMy9dXTNfXXEREgE5OREzETMRMxEzMTABMxEhFSEBMxEhFSEB2ZEDSvwlAWmRAeH9jgfJ+rqRBdf8I5IAAf/2AfIDHwfJAAkAOEAeBAAACQYCAgoLqAQByAQBBgSwBQEPBQEFAF8BAQEHAC8vXTMvXV0zX11xERIBOREzMzIRMzEwAzUhNSE1IREzEQoCl/1pApeSAfKR15ID3fopAAAAAAH/9gKmA9MHyQAJACJADgEGAwAHAwcKCwYBAggEAC8zLzMzERIBOTkRMxEzMjEwASE1IREzETMRMwPT/CMB45HYkQKmkQSS+24EkgAC//YB8gPTB8kABQALAD5AIQkBAQgLAAMLAwwNqAkByAkBBgmwCgEPCgEKAV8CAQIEBgAvMy9dMy9dXTNfXXEREgE5OREzETMyETMxMAEhNSERMyEzESE1IQPT/CMDTJH+BpH9jAHjAfKRBUb7kZIAAQKN/hQFtAfJAAsAQkAjBAgIDQIGCgoLCwwNCV8GAQMGqAUByAUBBgWwAgEPAgECCwAALy8vXV0zX11xL19dMxESATkRMxEzMxEzETMxMAEzESEVIRUhFSERIwKNkgKV/WsClf1rkgfJ/COS15H8IgAAAAACAdn+FAW0B8kABwALACpAEgQNCgsCBgYHCwcMDQUCBwsACAAvMy8zLzMREgE5OREzETMRMxEzMTABMxEhFSERIwEzESMDQpEB4f4fkf6XkZEHyftukftuCbX2SwAAAAADAdn+FAW0B8kAAwAJAA8ATEAoDgYGEQABDAgIDwkBCRARB18EAQMEqA8ByA8BBg+wDAEPDAEMCQEKAgAvMy8zL11dM19dcS9fXTMREgE5OREzMxEzETMRMxEzMTABIxEzEyEVIREjETMRIRUhAmqRkdgCcv4fkZEB4f2O/hQJtfq6kfwiCbX8I5IAAAH/9v4UAx8HyQALAD5AIQQAAAkGAgoKDA0AXwEBAwGoBAHIBAEGBLAFAQ8FAQUKBwAvLy9dXTNfXXEvX10zERIBOREzMzMyETMxMAM1ITUhNSERMxEjEQoCl/1pApeSkgHykdeSA932SwPeAAL/9v4UA9MHyQAHAAsAJkAQAAUCBgoLBgsMDQABCwYIAwAvMy8zLzMREgE5OREzETMzMjEwAzUhETMRIxEBMxEjCgHjkZEBaZGRAqaRBJL2SwSSBSP2SwAD//b+FAPTB8kAAwAJAA8ASkAnBw0NBgoKCQsCAwsDEBENXw4BAw6oBwHIBwEGB7AIAQ8IAQgDCwAEAC8zLzMvXV0zX11xL19dMxESATk5ETMRMzMRMzIRMzEwATMRIwEzESE1IRMjESE1IQNCkZH+l5H9jAHjkZH+HQJ0B8n2Swm1+5GS+igD3pEAAAL/9v4UBbQD7AAHAAsAQkAjCwMDDQgAAAUGBgwNBABfAQEDAQaoCAHICAEGCLAJAQ8JAQkAL11dM19dcS8vX10zMhESATkRMzIRMxEzETMxMAM1IRUhESMRATUhFQoFvv1rkv1pBb4B8pGR/CID3gFokpIAAf/2/hQFtAM3AAsAKEARAw0ACQoFBgoGDA0GCgQIAAEALzMyMi8zERIBOTkRMxEzMhEzMTADNSEVIREjESMRIxEKBb7+H5HYkQKmkZH7bgSS+24EkgAAAAP/9v4UBbQD7AAFAAsADwBOQCkNCAgRDgMDAAEKCwELEBEJAwMGXwQBAwQLAagOAcgOAQYOsA8BDw8BDwAvXV0zX11xLzMvX10zMxEzERIBOTkRMxEzMhEzETMRMzEwASMRITUhMyEVIREjARUhNQJqkf4dAnTYAnL+H5ECcvpC/hQD3pGR/CIF2JKSAAAAAAL/9gHyBbQHyQAHAAsAQEAiBwsLDQAICAUCAgwNqAAByAABBgAFsAEBDwEBAQhfCQEJAwAvL10zL11dMzNfXXEREgE5ETMyETMRMxEzMTADNSERMxEhFQE1IRUKApeSApX6QgW+A1qSA938I5L+mJGRAAAAAf/2AqYFtAfJAAsAKEARCw0ABQIJBgIGDA0JBQABBwMALzMvMzMzERIBOTkRMxEzMhEzMTADNSERMxEzETMRIRUKAeOR2JEB4QKmkQSS+24EkvtukQAAAAP/9gHyBbQHyQAFAAsADwBMQCgEDw8RCQwMCAsCBQsFEBEFCagJAcgJAQYJArAKAQ8KAQoMXw0BDQAGAC8zL10zL11dMzNfXXERMxESATk5ETMRMzIRMxEzETMxMAEzESEVIQEzESE1IQE1IRUDQpEB4f2O/peR/YwB4/4dBb4HyfwjkgRv+5GS/gaRkQAAAAH/9v4UBbQHyQATAFZALQsPDxUEAAAJDRERBgISEhQVEAANXwEBAwEMBKgEAcgEAQYECbAFAQ8FAQUSBwAvLy9dXTMzX11xETMvX10zMzIREgE5ETMzMxEzMzIRMxEzETMxMAM1ITUhNSERMxEhFSEVIRUhESMRCgKX/WkCl5IClf1rApX9a5IB8pHXkgPd/COS15H8IgPeAAAAAAH/9v4UBbQHyQATAD5AHAQVDRIKCg8LAgYGEwcLBxQVBQkNDQISDgcLABAALzMvMy8zMzMRMzMREgE5OREzMxEzETMzETMyETMxMAEzESEVIREjESMRIxEhNSERMxEzA0KRAeH+H5HYkf4dAeOR2AfJ+26R+24EkvtuBJKRBJL7bgAAAAT/9v4UBbQHyQAFAAsAEQAXAGRANAQODhkVCQkUBgYXBwIQEAURBxEYGQ8JCQxfCgEDCgUVqBUByBUBBhUCsBYBDxYBFhEHABIALzMvMy9dXTMzX11xETMvX10zMxEzERIBOTkRMzMRMxEzMxEzMhEzETMRMzEwATMRIRUhAyMRITUhMyEVIREjATMRITUhA0KRAeH9jtiR/h0CdNgCcv4fkf6Xkf2MAeMHyfwjkvq6A96RkfwiCbX7kZIAAQAAAu4FqgfJAAMAEbUABQEEAQIALy8RATMRMzEwASERIQWq+lYFqgLuBNsAAAAAAQAA/hQFqgLuAAMAEbUABQEEAQIALy8RATMRMzEwASERIQWq+lYFqv4UBNoAAAAAAQAA/hQFqgfJAAMAEbUABQEEAQIALy8RATMRMzEwASERIQWq+lYFqv4UCbUAAAAAAQAA/hQC1QfJAAMAEbUBAAQFAQIALy8REgE5MjEwASERIQLV/SsC1f4UCbUAAAAAAQLV/hQFqgfJAAMAEbUAAQQFAQIALy8REgE5MzEwASERIQWq/SsC1f4UCbUAAAAAKgBm/ncFqgclAAMABwALAA8AEwAXABsAHwAjACcAKwAvADMANwA7AD8AQwBHAEsATwBTAFcAWwBfAGMAZwBrAG8AcwB3AHsAfwCDAIcAiwCPAJMAlwCbAJ8AowCnAZFA9QIiMkqGBWpqAyMzS4cFaw4uRlZ6BW5uDy9HV3sFbwYeNk6KBWZmBx83T4sFZxIqQlp+BXJyEytDW38FcwoaOlKOBWJiCxs7U48FYxYmPl6CBXZ2Fyc/X4MFd5KWmp6mBaKik5ebn6cFo6N3Y3Nnb2sHqKljZ6MDa2tgZKADaF9bV1dcWFRPU58DS0tMUJwDSEM/R0dAPEQ3O5sDMzM0OJgDMCsnLy8oJCwbH5cDIyMYHJQDIBcTDw8UEAwHC5MDAwMECJADAIN/e3uAfHhoVEhEMCwgDAB4eAAMICwwREhUaAqEdHBsbHdzb4uPpwOHh4iMpAOEAC8XMzMRFzMvMzMzETMzEhc5Ly8vLy8vLy8vLxEzMzMRMzMRFzMzERczETMzMxEzMxEXMzMRFzMRMzMzETMzERczMxEXMxEzMzMRMzMRFzMzERczETMzMxEzMxEXMzMRFzMREgEXOREXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMxMBMzFSMlMxUjJTMVIwUzFSMlMxUjJTMVIwczFSMlMxUjJTMVIwUzFSMlMxUjJTMVIwczFSMlMxUjJTMVIxczFSMlMxUjJTMVIwczFSMlMxUjJTMVIwUzFSMlMxUjJTMVIwczFSMlMxUjJTMVIxczFSMlMxUjJTMVIwEzFSMlMxUjJTMVIwEzFSMlMxUjJTMVIwEzFSMRMxUjETMVIxEzFSMRMxUjETMVI2ZpaQGeaWkBomZm/Y9paQGgaGgBoGZmz2Zm/l5paf5iaWkED2Zm/mBoaP5gaWnPaWkBnmlpAaJmZs9mZv5gaGj+YGlpz2lpAZ5paQGiZmb9j2lpAaBoaAGgZmbPZmb+Xmlp/mJpac9paQGgaGgBoGZm/MBpaQGgaGgBoGZm+/FpaQGeaWkBomZmAZ5mZmZmZmZmZmZmZmYFpGJiYmJiY15eXl5eYGBgYGBgZV5eXl5eYGFhYWFhZF5eXl5eYGNjY2NjYlxcXFxcYmNjY2NjXmBgYGBgB+tiYmJiYgElYGBgYGD+32L+32D+3WH+3mP+4GMH8GAAAAAAVAAA/ncFqgclAAMABwALAA8AEwAXABsAHwAjACcAKwAvADMANwA7AD8AQwBHAEsATwBTAFcAWwBfAGMAZwBrAG8AcwB3AHsAfwCDAIcAiwCPAJMAlwCbAJ8AowCnAKsArwCzALcAuwC/AMMAxwDLAM8A0wDXANsA3wDjAOcA6wDvAPMA9wD7AP8BAwEHAQsBDwETARcBGwEfASMBJwErAS8BMwE3ATsBPwFDAUcBSwFPA0tAFBpKeqryBdraG0t7q/MF29sCMmKmuAEKtgXW1gMzY6e4AQtAFQXXHk6OrvYF3t4fT4+v9wXfBjZmorgBDrYF0tIHN2ejuAEPQBUF0yJSfrL6BeLiI1N/s/sF4wo6ap64ARK2Bc7OCztrn7gBE0AVBc8mVoK2/gXm5idXg7f/BecOPm6auAEWtgXKyg8/b5u4ARe1BcsqWoa6uAECtgXq6itbh7u4AQO1BesSQnKWuAEatgXGxhNDc5e4ARu1BccuXoq+uAEGtgXu7i9fi7+4AQe1Be8WRnaSuAEetgXCwhdHd5NBIwEfAAUAwwEmAS4BNgE+AUoABQFGAUYBJwEvATcBPwFLAAUBRwEiASoBMgE6AU4ABQFCAUIBIwErATMBOwFPAAUBQwFDAUdADMPvx+vL58/j09/XDbkBUAFRtMPHy8/TuAFDtwbX18DEyMzQuAFAtgbUr7O3u7+4AT+3BqurrLC0uLy4ATy2BqiTl5ufo7gBO7cGp6eQlJicoLgBOLYGpH+Dh4uPuAE3twZ7e3yAhIiMuAE0tgZ4Z2tvc3e4ATO3BmNjZGhscHS4ATC2BmBPU1dbX7gBL7cGS0tMUFRYXLgBLLYGSDc7P0NHuAErtwYzMzQ4PEBEuAEotgYwHyMnKy+4ASe3BhsbHCAkKCy4ASS2BhgHCw8TF7gBI7cGAwMECAwQFLgBILQGAPf7/7oBAwEHAUu1BvPz9Pj8ugEAAQQBSEAXBvDUqKR4YEgwGADw8AAYMEhgeKSo1Aq4AQi03ODk6Oy4AUS3BtjY3+Pn6+9BFAFHAAYA2wEPARMBFwEbAR8BTwAGAQsBCwEMARABFAEYARwBTAAGAQgALxczMxEXMy8XMzMRFzMSFzkvLy8vLy8vLy8vERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxESARc5ERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMzIRFzMzERczMTATMxUjNzMVIzczFSM3MxUjNzMVIzczFSMFMxUjNzMVIzczFSM3MxUjNzMVIzczFSMFMxUjNzMVIzczFSM3MxUjNzMVIzczFSMFMxUjNzMVIzczFSM3MxUjNzMVIzczFSMFMxUjNzMVIzczFSM3MxUjNzMVIzczFSMFMxUjJTMVIzczFSM3MxUjNzMVIyUzFSMFMxUjJzMVIyczFSMnMxUjJzMVIyczFSMHMxUjNzMVIzczFSM3MxUjNzMVIzczFSMXMxUjJzMVIyczFSMnMxUjJzMVIyczFSMHMxUjNzMVIzczFSM3MxUjNzMVIzczFSMBMxUjNzMVIzczFSM3MxUjNzMVIzczFSMBMxUjNzMVIzczFSM3MxUjNzMVIzczFSMTMxUjBzMVIxczFSMHMxUjFzMVIwczFSMXMxUjBzMVIxczFSMHMxUjETMVIxMzFSNmaWnPaWnPaWnRaGjRZmbPZmb7i2Zmz2Zmz2Zmz2ho0Glpz2lp/Fppac9pac9padFoaNFmZs9mZvuLZmbPZmbPZmbPaGjQaWnPaWn8Wmlpz2lpz2lp0Who0WZmz2Zm+4tmZgGeZmbPaGjQaWnPaWn8w2ZmA6ZmZs9mZtFoaNFpac9pac9paWZmZs9mZs9mZs9oaNBpac9paWlmZs9mZtFoaNFpac9pac9paWZmZs9mZs9mZs9oaNBpac9pafv0ZmbPZmbPZmbPaGjQaWnPaWn8Wmlpz2lpz2lp0Who0WZmz2Zmz2ZmaWlpaWZmaWlpaWZmaWlpaWZmaWlpaWZmaWlpaWlpZmYFpGJiYmJiYmJiYmJiY15eXl5eXl5eXl5eYGBgYGBgYGBgYGBgZV5eXl5eXl5eXl5eYGFhYWFhYWFhYWFhZF5eXl5eXl5eXl5eYGNjY2NjY2NjY2NjYlxcXFxcXFxcXFxcYmNjY2NjY2NjY2NjXmBgYGBgYGBgYGBgB+tiYmJiYmJiYmJiYgElYGBgYGBgYGBgYGD+32JjXmBgZV5gYWReYGNiXGJjXmAH62IBJWAAAABDAAD+FAXVByUASQBNAFEAVQBZAF0AYQBlAGkAbQBxAHUAeQB9AIEAhQCJAI0AkQCVAJkAnQChAKUAqQCtALEAtQC5AL0AwQDFAMkAzQDRANUA2QDdAOEA5QDpAO0A8QD1APkA/QEBAQUBCQENAREBFQEZAR0BIQElASkBLQExATUBOQE9AUEBRQFJAU0BUQNBuQAAAVNAektri6v4BcvLBQkNERUFAQFofKnoGgXJyQcLDxMXBQNPb5Wv9AXPz0xsjKz5Bcxkf6XkHgXFxWl9pukbBcZTc4+z8AXT01BwkrD1BdBgg6HgIgXBwWWAouUfBcJXd5e37AXX11R0kLTxBdRch53cJgW9vWGEnuEjBb77vgELARsBKwFQAAUBOwE7tlh4mLjtBdi/AQgBFAEpAUgAKgAFATkBObddiJrdJwW6/0EdAQ8BIQEvAUwABQE/AT8A/AEMARwBLAFRAAUBPAEEARcBJQFEAC4ABQE1ATUBCQEVASYBSQArAAUBNrc0ODxARAVISL4BAAEQAR4BMAFNAAUBQLcyNjo+QgVGRkELAQUBGAEiAUUALwAFATIBMgFAATYBPEAKuti+1MLQxswDDbkBUgFTQAwcICQoLDAGGBbO0ta5AToBPrdHBsrKvcHFybkBNQE5tQYCvMDEyLkBNAE4tQYFBa+zt7kBKwEvtUQGq66ytrkBKgEut0MGqqqdoaWpuQElASm1BgacoKSouQEkASi1BgkJj5WXuQEbASG1QAaLjpSWuQEaASC3PwaKinx/g4e5ARQBF7UGCnt+goa5ARMBFrUGDQ1vc3e5AQsBD7U8Bmtucna5AQoBDrc7BmpqXGBkaLkBBAEItQYOW19jZ7kBAwEHQBgGERFPU1f7/zgGS05SVvr+NwZKStzg5Oi5AUQBSLUGEtvf4+e5AUMBR7UGFRXs8PS5AUwBUEAcNAb4AqsGiwprDksS+PgSSw5rCosGqwIKFs/T17kBOwE/t0gGy8sB6+/zuQFLAU9ADDMG9/caHiImKi4GFgAvFzMzERczLzMRFzMSFzkvLy8vLy8vLy8vERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxDGFzIREgEXOREXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMyERczMxEXMxEzMTABIREzNSMRMzUjETM1IxEzNSMRMzUjETM1MxUzNTMVMzUzFTM1MxUzNTMVMzUzFTM1MxUjFTMRIxUzESMVMxEjFTMRIxUzESMVMwEVMzUzFTM1MxUzNTMVMzUXIxUzJyMVMycjFTMnIxUzBxUzNTMVMzUzFTM1MxUzNQUjFTM3FTM1MxUzNTMVMzUFFTM1IRUzNQc1IxUlFTM1EzUjFSM1IxUjNSMVIzUjFQcVMzUzFTM1MxUzNTMVMzUTNSMVIzUjFSM1IxUjNSMVBxUzNTMVMzUzFTM1MxUzNRMjFTMnIxUzJyMVMycjFTMBIxUzJyMVMycjFTMnIxUzARUzNTMVMzUXIxUzJyMVMwcVMzUzFTM1ByMVMzcVMzUFFTM1FzUjFRc1IxUjNSMVBxUzNTMVMzUTNSMVIzUjFQcVMzUzFTM1EyMVMycjFTMTIxUzJyMVMwXV+itqampqampqampqamtqa2pram1ramtqamtra2tra2tra2tra2v6lWtqa2pram1ra2vYamrVamrVamrVa2pramtqbf3pampramtqbWv8qWsBP2vVawGqbWtrbWpramtqa2tqa2pram1ra21qa2pramtramtqa2pta2tr2Gpq1Wpq1WpqAhdtbddra9Vra9VrawLsamtqampq1Gtr1Wprampra2pq/ldq1WrUampramprampqamtqamtqampq1Gtrampq1Wpq/hQBIWMBIGMBImEBIGMBIWIBIWBgYGBgYGBgYGBgYGDDYv7fXv7bXv7bXv7bXP7dYAZoXl5eXl5eXl6+Y2NjY2NjY2JeXl5eXl5eXr5hYWFhYWFhYcVeXl5eXl5eXl5e/t9jY2NjY2NjY2JcXFxcXFxcXP7fY2NjY2NjY2NeYGBgYGBgYGAGzWJiYmJiYmIBIGJiYmJiYmL+315eXl6+Y2NjYl5eXl6+YWFhYcVeXl5eXsNjY2NjYlxcXFz+32NjY2NeYGBgYAbNYmJiASBiYmIAAAABAHsA9gRaBNUAAwARtQMCBAUDAAAvLxESATk5MTATIREhewPf/CEE1fwhAAIABgAABM8EyQADAAcAHkAMBQMCBgMGCAkFAwQAAC8yLzMREgE5OREzETMxMBMhESETESERBgTJ+zdMBDEEyfs3BH37zwQxAAEAbQF/AmgDewADABG1AQAEBQECAC8vERIBOTkxMAEhESECaP4FAfsBfwH8AAAAAAIAbQF/AmgDewADAAcAHkAMBwEABAEECAkHAQYCAC8zLzMREgE5OREzETMxMAEhESEDESERAmj+BQH7S/6bAX8B/P5QAWL+ngAAAAABAAAAgQgAAukAAwARtQIFAwQDAAAvLxEBMxEzMTARIREhCAD4AALp/ZgAAAEBngAABkwErgACABG1AAIDBAABAC8vERIBOTkxMCEBAQGeAlgCVgSu+1IAAQGR/+UGWgSsAAIAE7cBAgADAwQCAAAvLxESARc5MTAJAgGRBMn7NwSs/Z79mwAAAQGe/+UGTASTAAIAEbUCAAMEAQIALy8REgE5OTEwCQIGTP2q/agEk/tSBK4AAAAAAQGR/+UGWgSsAAIAEbUCAQMEAQAALy8REgE5OTEwAREBBlr7NwSs+zkCZQACAKgAogQtBCkADwAfAB5ADBAACBgAGCAhFAwcBAAvMy8zERIBOTkRMxEzMTATNDY2MzIWFhUUBgYjIiYmNxQWFjMyNjY1NCYmIyIGBqh30Xh70Xl50Xt40XdWYKhiY6piYKxjYKpgAmR503l503l40Xl5zntiqmBgqmJjqmJiqAAAAAAQAGIAVgReBFIABwAPABcAHwAnAC8ANwA/AEcATwBXAF8AZwBvAHcAfwD8QJFYXFBoaFRsOHh4PHwocHAsdCBgYCRkCEhIDEwAQEAERBAwMBQ0GBwcNERMZHR8bFwJgIFKcnZOdtB24HYCQnp+Rn7QfuB+AjJqbjZusG4BGlpeHl4SUlYWVo9Wv1bPVgMCOj4GPv8+AQoqLg4udn5uXlY+Li4+Vl5ufnYHJmYwYkBiAmIiMHAmAS8mPyaPJgMmAC9dXRrJL13JERc5Ly8vLy8vLxEzEMkyXREzEMkyXREzEMkyETMQyTJxETMQyTJdETMQyTJdETMQyTIREgEXOREzETMzETMRMzMRMxEzMxEzETMzETMRMzMRMxEzMxEzETMzETMRMzEwARQjIjU0MzInFCMiNTQzMhMUIyI1NDMyFxQjIjU0MzIBFCMiNTQzMgcUIyI1NDMyARQjIjU0MzIBFCMiNTQzMgEUIyI1NDMyBxQjIjU0MzIBFCMiNTQzMgcUIyI1NDMyARQjIjU0MzIlFCMiNTQzMhMUIyI1NDMyJxQjIjU0MzID1zM3NzOTNDk5NPc3NTc1IzM3NzP+ODY1NTatNzU1NwJSNzU1N/0bNzY2NwKBMzc3M5M0OTk0/a40OTc2IzU4ODUBxzY1NTb+XDY3Nzb3NzU1N5M3NjY3A5Y2NjcrNTU3/tM3NzXjNTU1AZQ4ODVaNTU3/Xc1NTcBuTYzOv1DNTU4mjMzNwIdNzc14zU1Nf4ENzc24zU1N/7VNzM3KzU1OAAAAAABALIAiQQjA/oADQARtQoEDg8HAAAvLxESATk5MTABMhYWFRQAIyIANTQ2NgJqbdlz/v63tv7+b9cD+nXZarf+/gECt2zVdwACACkAAASsBIMAAwATAB5ADAQAAwwADBQVCAAQAQAvzS/NERIBOTkRMxEzMTAzESERARQWFjMyNjY1NCYmIyIGBikEg/wEd8t2dc13d8t3ds11BIP7fQJCd8t3d811dM13d80AAwApAAAErASDAAMAEwAjACdAEhQAAxwcDAQABCQlCCAQGAAgAQAvzS/dzhDOERIBFzkRMxEzMTAzESERATQ2NjMyFhYVFAYGIyImJicUFhYzMjY2NTQmJiMiBgYpBIP8UmCqYmGqYmKqYWKqYE53y3Z1zXd3y3d2zXUEg/t9AkJgqmJiqmBjqmBgqmN3y3d3zXV0zXd3zQACAHMBhQJiA3UADAAYACZAEhMGAA0GDRkaFgADEAMCAwMQCQAvMzMvXTMREgE5OREzETMxMAEUBiMiJjU0NjMyFxYHNCYjIgYVFBYzMjYCYpVjZpGTZGlGSUtnRkVnY0lOXwJ9a42QaGaSSkhmRmZmRkhkaAAAAAAFAbD/5QZ5BKwACwAYACQAMAA6AGtAExMGGR8lKwAMDDYrOh8GBjs8NTG4/8BAKQkMSDE2OAE4MzNACRBIKBwcLiIPIk8iXyIDMyIzIhYJFgMPHwkvCQIJAC9dMy8zERI5OS8vXREzMxEzKxEzXcYrMhESARc5ETMRMxEzETMxMAEUACMiACc0ACEgAAc0ACMiBwYVFAAzMgABFAYjIiY1NDYzMhYFFAYjIiY1NDYzMhYBFjMyNxcGIyInBnn+l/z7/pkCAWIBAgEDAWJa/s/a2ZeaATPX2gEx/VotISEtLSEhLQHTKyEhLy8hISv96UyTkkw9YLu4YgJI/v6bAWf8+gFq/pb62QEzmpnZ1/7MATQBVh8vLx8gLS0gHy8vHyAtLf6/iYkjuroAAAAEAdH/5QaaBKwACwAXACMALQBTQDQAGCgeDCQSBgguLyktAC0BIRsVDw8PTw9fDwMtJvArAQ8rAStADRBIKw8rDwkDHwkvCQIJAC9dLxI5OS8vK11dzs1dEM4zMl0RMxESARc5MTABFAAjIgAnNAAhIAAFNCYjIgYVFBYzMjYlNCYjIgYVFBYzMjYBFjMyNycGIyInBpr+l/z+/pwCAWIBAgECAWP9ADAeIS0tIR4wAdMuHiEvLyEeLv2uYri5Yj5LkpNMAkj+/psBZ/z6AWr+lnsgLS0gHy8vHyAtLSAfLy/+27q6I4mJAAAAAAIBRv9zBg4EOwApADUAcEA9CA8PMyQdHS0lHC0iHycaAhYWKRcFEgoNDTMHEAwQMxIXGh8cIAk2NyINHwMKEg8KCAUkJwcCMBgVKigpAgAvMxrJLzPJEhc5LxczERIBFzkRMxEzETMRMxEzMxEzETMRMzMRMxEzETMRMxEzMTABMxUWFhc3FwcWFzMVIwYHFwcnBgYHFSM1JicHJzcmJyM1MzY3JzcXNjcXIgYVFBYzMjYnNCYDiUJBZTu6LbhWBtfXEEy4MbYyV1hCeWS8K7ZOENfXDFC0KbxvcB+LwcOJi8YDxQQ72QYnLbYtuHF0Pn1gvCu2JSoN2dkQSrQtuGR9PoFeuDG2Tgw9x4eHxciEh8cAAAIB2QBQBCcEgQAXACQAVEArEAoVGwMOEhIXEwoiIhMDAyUmERUVDhYNAAAeHx4vHgIWHhYeBkATARMYBgAvMy9dEjk5Ly9dETMRMxEzMxEzERIBFzkRMxEzMxEzETMzETMxMAEmJjU0NjMyFxYVFAYHFSEVIREjESE1IRMiBhUUFjMyNzY1NCYC23GJrnF3VFaSaAEA/wBM/v4BAiVYd3tUVjs+dwJCEqJofaZWVHlsog6mRv76AQZGApF4VVZ5Pj1UVncAAAAAAgFSAPoErgSBACwAOABGQCMXFAQfMCcfLCE2FB4eADYsJwU5Oh4AGggPLB8qMyQtJA8DKgAvFzMvMxI5OS/ExDk5ERIBFzkRMxEzETMRMxEzETMxMAEmJyY1NDc2MxcWMzI3NjMyFQcGFRQXFxQHByImJicHFhUUBiMiJjU0NjMyFwciBhUUFjMyNjU0JgQAkysJBgcIIUM8WCkiDw4EEAwEBA4VJSMO61SxcnWsqHtFVJlae31YWHt9BAArKwQOCQgEBBENDA4bO2NNNCAJBgZCWjHuUmx9rqR5eKorIHlaX3Z9WFh7AAEAOwAABAQEzwAhAClAFgYQCxcRHAYiIwsXFwkPGR8ZAhkZEQAALy85L10zOREzERIBFzkxMAEWFhcWFhcUBiMiJx4CFxchNzI2NjU1BiMiJjU0Njc2NgIhGGGVjUYCgVicZARQooUG/OoGe6xYWqpbgVhliYUEz2CojH+DR2F/v6CmXgglJWCskg6/f11ah1J3ugABADsAAAUEBMcAMwBDQCYnAB0fLgcTFwEOCjQ1KgsPCx8LAi4IHxMjEwgRDxEBCxELERoBGgAvLxI5OS8vXRI5OTIRMxEzXREzERIBFzkxMCEhNz4DNScGBiMiJjU0NjcyFyYnJjU0NjMyFhUUBzY3NjMyFxYVFAYjIiYmJx4DFwRG/LYIh3deNgM5sFpzopRcPWUlEguicXSgRVQQFidpQ0qcdDh2Xz0EMW9/cCMaOHeVTC95dZ16c50CM0InJCd5lqBrVmInBAhOS3V1pDJRaX2aeDYUAAABAGb/6QRaBHkAGAAYQAkHExkaDRAAChAALzMvEjkREgE5OTEwBSYmJycmJjU0NjMyFhc2NjMyFhUUBgcGBgJiFlqwW0s2jGRWjychj1hhj1hvjYEXVrfre2WBQWuJc3d3dYdjVr6Js9UAAAABAEL/5wPTBMcACwARtQkDDA0GAAAvLxESATk5MTABFgAXBgAHJgAnNgACBkoBCHtG/s9UK/76lXQBAgTHff6XiUb+aZRSAW2yiQFYAAAAAAEAxQAdAzsEgQAZAC5AFQgKAg4OGQUKChkUAxobFxGACAgRAAAvLzkvGhDNERIBFzkRMxEzETMRMzEwATMVFxYVFAcjNjU0JicRFAYjIiY1NDYzMhcB6UyabF4vOXJAk2s5OX1NKy8EgWTBk6qWeX95d6AK/gZ7lzctTnMTAAAAAgEQ/9UE8ASHABoAHgBCQCMbDQ0YABwKCgUYEwQfIAgDDBsLHAMZHB0bHgsMCBoWgBAdGgAvMy8azRIXOREzETMvzRESARc5ETMzETMRMzEwARQGIyI1NDYzMhcRBREUBiMiJjU0NjMyFxElASU1BQTwm19ze04vK/3ZiXM5OndKNi4Cu/2PAif92QFEf5RlUW8SAcCV/nZ0nDUtTHUTAvCy/meVdZgAAgBm/zcEAgXNABsAHwB1QEUNCRIfBgYPCwcWGgICExwDGAAAAwcJBCAhCAoLHwUEHAEaAAobCQ4MDx4dEhMWGRgKDRcJFxAQFwkDBxQDAAcBYAcBBxQALy9dcS8REhc5Ly8vEM0XORDNFzkREgEXOREzETMzMxEzMxEzMzMRMzMRMzEwAQcRIxEFESMRBzU3EQc1NxEzESURMxE3FQcRNwURBREEAslg/rZgycnJyWABSmDJycn+1/62AbxY/pwBPZ/+mQFAYJ9eAfZgoGABRv7hoAFc/stenmD+ClqBAfag/goAAQAUAAAD/gW2ABUAbEA6AxUVEwgMEBAFARMKDhIOEwMWFwsDBANsWQgPBAEJAwQADxUAFWxZDA8APwACCwMAABMGAxMQaVkTEgA/KwAYPxI5L19eXTMrEQAzGBDGX15dMisRADMREgEXOREzETMzMxEzMxEzETMxMBMzNSM1MxEzESEVIRUhFSERIRUhESMUs7OzuAFc/qQBXP6kAn/8ybMCWLaSAhb96pK2kf7dpAHHAAAAAQAUAAAB/AYUABMAX0AxEgICBAsHBwUQAAQEDQkFBRQVEwsMC15ZEA8MAQkDDAgDBwgHXlkAvwgBCAgFDgAFFQA/PxI5L10zKxEAMxgQxl9eXTIrEQAzERIBOREzMzMRMzMRMxEzETMRMzEwATMVIxEjESM1MzUjNTMRMxEzFSMBYpqatJqampq0mpoCjZH+BAH8kbeRAj/9wZEAAf/6AAAD/gW2AB0AWUAwDRUDAxIGBRsGAx4fAgcACRUSFxoJsBABDxAfEC8QAwkDEEANFwAABhMDBgNpWQYSAD8rABg/EjkvMzMazV9eXV0yMhE5ORESOTkREgEXOREzMxEzMjEwASInESEVIREmIyIGByM2NjMyFxEzERYzMjY3MwYGAdkfEgJW/PIYEywqDWgLZFUPI7gXFioqDmcLZAJOCP5OpAK4Czs8eowIAmz9MQo7PHiOAAIAFAAABG8FtgANABoAgEBNCAYOEgUFCgYAFhYQBgMbHBEICQhpWQ5tCQFFCVUJAhkJKQkCCAnYCQIPDwkfCS8JAyQDCQkGCwQSa1kABBAEAgkDBAQLBhILGmtZCwMAPysAGD8SOS9fXl0rERIAORgvX15dXl1dXV0zKxEAMxESARc5ETMRMzMRMzMRMzEwARQEISMRIxEjNTMRISABIRUhFTMyNjU0JiMjBG/+zv7qqLizswGDAiX9EAEr/tWT2sS2wboECODv/ccDqKABbv6SoNGNnI2MAAIAx/4UBNsFtgAIAB4AS0AnGxgaBAAJCRMYBAQPEwMfIBoeAB5rWQAAFBwSFAhpWRQDEQxpWREbAD8rABg/KwAYPxI5LysRADMREgEXOREzETMRMxEzETMxMAEzMjY1NCYjIxEUFjMyNxUGIyARESEgBBUQBQEjASEBf9uypKa60TpHMioxQv7eAZMBEAEF/tsBkdf+nv7dAviMiop/+kdXURWcGwFCBmDP0P7dZf1xAlwAAAQAXv5WA9cGFAAgACgALgAzAJBATxsgFCkHKQsFCCQrMiwxDhwZMycgAQEnGQ4sKwgLCDQ1MTMQLCsoJAgNBTMHMyhgWQ8zHzN/MwMdAzMzABcaAAAVHBcXEF5ZFxAFJF5ZBRYAPysAGD8rEQAzGD8/ERI5L19eXSsAGC8REjk5ERI5ORESORESARc5ETMRMxEzETMRMxEzETMRMzMRMxEzMTAhJyMGBiMDIxMmJjUQJRMnIyIGByc2NjMyFxMzAxYWFREBIwcDNjY1NQUUFxMGBgE0JwM3A1QjCFKjfH+JhWtsAaRmEhJXm0Q3U8RgJiKLiZZkX/6sDw5/lqj99E1xXmACDkRWmpxnSf5qAagfoHQBKysBQgI0IIcsMgQBvP4pJaSI/RQCEgL+bQKmkWPqaywBZhVjAVGKO/7zBwAAAgAh/lYDRgYUAB0AIABqQDcYIgsOBBoQDhUeHg4JDBsfGhYWHwwOBCEiHiAADA8HCxcADxsgExITGQMVFSBkWRUPBwBdWQcWAD8rABg/KxEAFzMYLxEzMz8vERI5ERI5ERIBFzkRMxEzETMRMxEzETMRMxEzETMxMCUyNjcVBgYjIicDIxMmNREjNTc3MxUzEzMDFSMDFgMTIwIdI14YGWk2X0GJiaY2m51Ia7qRipgl8yxRj49/DgmKCxUc/k4CDlGKAn9WSOr8Acr+H3X8/DsBeQHGAAD//wDH/n8F1QW2AgYCqAAAAAEArv6DBO4GFAAaAEVAJBIQDAwNGgQBAgIEDQMbHBIEFgIiDgANFRYIXVkWEAQaXVkEFQA/KwAYPysAGD8/PxESORESARc5ETMRMxEzETMzMTAlESMRIxE0JiMiBhURIxEzERQHMzY2MzIWFREE7rOhd3+nm7S0CgwxtHHIypj96wF9Ar6Gg7rW/ckGFP44WkBQWr/S/c0AAQDH/n8FFwW2ABAARUAkCg4HAwMECwEODw8BBAMREgcBCwMCAgQJBQMEEg8iAAxpWQASAD8rABg/Pz8zEjkRFzMREgEXOREzETMRMxEzETMxMCEBBxEjETMRNwEzAQEzESMRBBv9+ZW4uH4CCdf9vQHhnLECuoP9yQW2/S+LAkb9g/1r/dsBgQAAAAABAK7+gwRgBhQAEwBKQCcNEQgHAwMEDgEREhIBBAMUFQEOCAMCAgAMEiIFAAwPBBUAD11ZABUAPysAGD8/Pz8REjkRFzMREgEXOREzETMRMxEzMxEzMTAhAQcRIxEzEQczNzcBMwEBMxEjEQNa/oN9srIICD1GAV/S/kQBZqKyAgBt/m0GFPzTsk5UAXP+K/4j/esBfQAAAAEATv5/BEQFtgALAENAIgIKBgkDAAkKCgcAAwwNCiIGAwQEA2lZBAMBBwAAB2lZABIAPysREgA5GD8rERIAORg/ERIBFzkRMxEzETMRMzEwMzUBITUhFQEhESMRTgMC/RYDyfz+AxexiwSHpIv7ef3bAYEAAQBQ/oMDcwRKAAsAPUAgBgIJCgoCBwMABQwNCiIGAwQEA2RZBA8BBwAAB2RZABUAPysREgA5GD8rERIAORg/ERIBFzkRMxEzMTAzNQEhNSEVASERIxFQAk791QLx/bsCVLJ3A0eMh/zI/fgBfQAAAAIAff/sBVoFywAOACIAOkAdAyAWChMYIBgjJBMZFxQDFxIPAGlZDwQcBmlZHBMAPysAGD8rABg/PxI5ORESATk5ETMzMxEzMTABIgIREBIzMjY2NRE0JiYnMhYXMzczESMnIwYGIyIkAjUQAALjx9/eyp7EW1zGmZDuOwofkZEfCjnlpbv+7JEBRwUr/sn+5/7r/sVctqABPKG2W6BuY7z6SrxgcLYBVuUBYQGNAAAAAQAAAAAEMQReABgAIkAQCxkDGhIKCw8ABV1ZABAKFQA/PysAGD8SOREBMxEzMTABMhcVJiMiBgcBIwEzEx4DFzM2NxM2NgO8QzIlGCMwFP7Xzv5qwd8LGxkUBQgLNLkeWgReGIUKNjn8pARK/XkiUU9IGTiiAj5dSQAAAAEAGQAAB6gFwwAjACpAFR0JJCUEDRQDCBAJAwEIEhofa1kaBAA/KwAYPzM/MxIXORESATkzMTAhIwEmJwYHASMBMxMWFzY3ATMBFhc2NxM2NjMyFxUmIyIGBgcFx7v+7j8LEDb+7Lr+fcDjLhgWOAECvgECNhoTNbIbcmE6JBgjJScdCgO+1ktztPxIBbb8g6+tpMMDcvyHuqaQzgLHZ1oRkwoVLCcAAAEAFwAABnsEXgAoACpAFSQKKSoEDhoDCRMKDwEJFSEmXVkhEAA/KwAYPzM/MxIXORESATkzMTAhIwMmJyMGBwMjATMSEhczNzY3EzMTHgMXMzY3EzY2MzIXFSYjIgcFAtO8GjIIKiDFzP7TumhtCggOHx3DxL0KFxQQBAkIO2cSYFJDMiUZTBoCak3Ww2L9mARK/mv+Wlc+j1oCa/2VI09NSR1H/wG4VFIYhQpvAAAAAgAUAAAEEARoAAoAJABeQDIiJgASAxAbCxgFBQsQEg0FJSYdJCEbFQ0DCA4NDl1ZDw0BEgMNDSQVIQ8VCGRZFRAkFQA/PysAGD8REjkvX15dKxESADkREjkREjkREgEXOREzETMRMxEzETMxMAEUFhc2NTQmIyIGAwYjNTI3JjU0NjMyFhUUBgcSFzM2NxMzASMBGRMegzIoJzM+T3hrKTOOcnGLf3dkGQgRS/TA/lTOA38nV09Ahys5Of5UD5YGkFpwiYBpdLI0/vZ7VM8Ch/u2AAABAMcAAAPyBbYABwA7QCIGAgIDAwAICQYBaVk4BgGaBgFpBgEwBgGQBgEGBgMEAwMSAD8/EjkvXXFdXXErERIBOTkRMxEzMTABIREjETMRIQPy/Y24uAJzAqr9VgW2/ZYAAAAAAQCuAAADeQRKAAcAS0AtBgICAwMACAkGAV1ZBAYB9AYBBrUGAQOPBgFNBl0GAn0GAQW/BgEGBgMEDwMVAD8/EjkvXV9dcV1fXV9dcSsREgE5OREzETMxMAEhESMRMxEhA3n96bS0AhcB6f4XBEr+NwAAAAACAHH/7AVcBF4AEwAdADtAHgUICAIbChEUFAoCAx4fBBAOF11ZDhAbCgAKYVkAFgA/KxEAMxg/KwAYPxESARc5ETMRMxEzETMxMAUgERA3FwYGFRAFETQ2MzISFRAAEzQmIyIGFRE2NgLX/ZrRi1lPAV6qmrnc/qyeeGVHT7DDFAI/ASr/YHXfe/6DIwJetsX+2vn+5P7JAlG41HJy/aAQ5gAAAAIAIQCYApMD7AADAAcALEAWAwcBBQcFCAkCBAIEXwZvBgIGDwABAAAvXS9dOTkvLxESATk5ETMRMzEwARcBJyUXAScCSkn910kCKUn910kD7G3+hW0Obf6GbAAAAv+TBSEBaAdgAAMADwAtQCEHHw0vDU8NXw3PDf8NBg1AAQEBHwAvAE8AXwDPAP8ABgAAL13NXS9dMzEwExEzEQE0NjMyFhUUBiMiJtOV/is/LCs/OjAsPwUhAj/9wQEjOzc3OzY9OAAC/5MEewHLBrYAAwAPACVAGQcfDS8NTw1fDc8N/w0GDQHALwNPA88DAwMAL10azS9dMzEwAwEXAQM0NjMyFhUUBiMiJjUBl2n+aqI/LCs/OjAsPwTjAZhp/mkByTs3Nzs2PTgAAv7fBNkBHwa2AAMADwAjQBgHDw0fDS8NTw1fDc8N/w0HDQAPA18DAgMAL10zL10zMTABIRUhEzQ2MzIWFRQGIyIm/t8CQP3AtD8sKz86MCw/BW+WAWs7Nzc7Nj04AAAAAf7wBMMBEAYXAAUAELcDAQ8AXwACAAAvXTIyMTABNSE1MxH+8AG0bATDbOj+rAAAAQCPBKwDVAc7AAYAHUAOAwQBBAYDBwgCAAQDBgMAPxczERIBFzkRMzEwAQEhESMRIQHyAWL+65v+6wc7/nv+9gEKAAABAI8EjwNUBx8ABgAdQA4FAgYCAQMHCAIABAMGAwA/FzMREgEXOREzMTABASERMxEhAfL+nQEVmwEVBI8BhQEL/vUAAAIAkwKgAZEG9AADAA8AJkARAgQDCgQKEBEqAQEBBwICBw0ALzMzLxI5XRESATk5ETMRMzEwASMDMwM0NjMyFhUUBiMiJgFOdTPb7kE+PkFCPT1CBFgCnPw3QkdJQD9MSgAAAAACAJMCtAGRBwgAAwAPACRAEAoDBAIDAhARJQABAAcHDQMAL8QyEjldERIBOTkRMxEzMTATMxMjExQGIyImNTQ2MzIW13Uz2+1DPDxDRDs2SQVQ/WQDyUZDSEFAS0IAAAACAJMBWAGRBcsAAwAPACVAEQoDBAIDAhARJQABAAcHAw0EAD/EMxE5XRESATk5ETMRMzEwEzMTIxMUBiMiJjU0NjMyFtd1M9vtQzw8Q0Q7NkkEEv1GA+dGQ0hBQUtCAAAB/vAEwwEQBhcACQAXQAoEAAgBCAIGCAYJAC8zMxEzL10zMTABFSMVIzUjFSMRARC+bYlsBhdt5+fnAVQAAAAAAf7wAAABEAFUAAkAErYDCAUBCAEAAC8yMhEzLzMxMCE1MzUzFTM1MxH+8L5tiWxt5+fn/qwAAP//APn+UwOt/5sBBwFL//X5egAdtADQDQENuP/Asw8SSA24/8C0Cg5IDSMAPysrXTUAAAAAAgCTALABkQRmAAsAFwAmQBIMABIGAAYYGQkDfVkJDxV9WQ8ALysAGC8rERIBOTkRMxEzMTATNDYzMhYVFAYjIiYRNDYzMhYVFAYjIiaTQTw9REQ9O0I/Pj9CRD07QgE7QkhIQkBLSgLhQklIQ0BLSgAAAAACAGYBdQMtA6AAAwAHACJADwQCBwECAQgJAwACAQIEBQAvM8ZdMhESATk5ETMRMzEwARUhNRE1IRUDLf05AscCBpGRAQiSkgABAKYBnAGBBbYAAwAStgIDBAUBAgMAP80REgE5OTEwASMDMwFOdTPbAZwEGgAAAAABAKYDNQGBBbYAAwAStgIDBAUBAgMAP80REgE5OTEwASMDMwFOdTPbAzUCgQAAAAAD/t8EzwEtBvgACwAXAB8AO0ApQBpQGmAasBrAGtAaBmAacBqAGgMagAAfEB8wHwMfHwkVFQMPD18PAg8AL10zMxEzMy9dGsxxcjEwARQGIyImNTQ2MzIWBRQGIyImNTQ2MzIWJzY3MxUGByMBHzMuLjI6Jik4/n84Jy4yOiYnOA2VMNc56nkFMzA0Ni41MjI1NS82LjUyMn+tZBVZuwAAA/7TBM8BHwb4AAsAFwAfADtAKUAcUBxgHLAcwBzQHAZgHHAcgBwDHIAAGRAZMBkDGRkJFRUDDw9fDwIPAC9dMzMRMzMvXRrNcXIxMAEUBiMiJjU0NjMyFgUUBiMiJjU0NjMyFjcjJic1MxYXAR8zLi4yOiYpOP5/OCcuMjomJzjReeo51zCVBTMwNDYuNTIyNTUvNi41MjJnu1kVZK0A////+gYUBAYGnAIGAHEAAAAB/OUEsgAKBjMACQAXQA0FBQAPCV8JfwnPCQQJAC9dMzMvMTATIyIEByM2JCEzCgrr/q5OkGABmQEiCgWgenS9xAAAAAH/9gSyAxsGMwAJABdADQUFCQ8AXwB/AM8ABAAAL10yMi8xMAMzIAQXIyYkIyMKCgEjAZZikE/+r+sKBjPEvXR6AAAAAfzsBNsAAAXhAAsAOUAnygsBDwsBC8oAAQ8AAQAABhAGIAYDBgYDDwgBDwgfCC8IXwjPCAUIAC9dcTMzL10uXV0uXV0xMBEmJiMiByMSITIWF7WqT98faC4BPHTPZwUbJxZ9AQYmFwABAAAE3QMUBeMACgA/QC3KCgEPCgEKygABDwABAA8FHwUvBQMFBQMQCCAIQAhQCHAIoAjgCPAICA8IAQgAL11dMzMvXS5dXS5dXTEwERYWMzI3MwIhIiVn0nXfH2gu/sak/vgFpBcnff76PgABAKAAAAO2BYEACQAkQA8ABwEEAQoLCAUCBwUBBAUALzMvEjk5EMQREgE5OREzMzEwISMRASE1IQERMwO2h/6k/s0BZAErhwOeAVyH/tUBKwAAAAABAKAAAAO2BYEACQAkQA8ABwEEAQoLCAUCBwUBBAUALzMvEjk5EMQREgE5OREzMzEwISMRASE1IQERMwO2h/6P/uIBZAErhwLVAiWH/kYBugAAAAABAKAAAAO2BYEACQAkQA8ABwEEAQoLCAUCBwUBBAUALzMvEjk5EMQREgE5OREzMzEwISMRASE1IQERMwO2h/6F/uwBbgEhhwGgA1qH/VoCpgAAAAABAKAAAAO2BYEACAAgQA0ABgMGCQoHBAYEAQMEAC8zLxI5EMQREgE5OREzMTAhIwEhNSEBETMDtof+f/7yAW4BIYcE+of8RAO8AAAAAQBOAAADtgWBAAgAIEAOAAEEAQkKAgYDAwcBBQcALzMvEhc5ERIBOTkRMzEwISMRAQE3AQEzA7aH/rj+Z1wBPQFIhwTH/swBhmD+1QEzAAABAFIAAAO2BYEACQAiQA4ABwEEAQoLAwYGCAEFCAAvMy8SOS8zERIBOTkRMzMxMCEjESEBNwEhETMDtof+lP6PYgFIATOHA6gBb2D+uAFSAAEATgAAA7YFgQAHACBADQAFAQMBCAkCBQYBBAYALzMvEjk5ERIBOTkRMzMxMCEjEQE3AREzA7aH/R9eAoOHAkoCzV79mQJzAAEATgAAA7YFgQAJACBADQAHAQQBCgsCBwgBCAUALzMvEjk5ERIBOTkRMzMxMCEjNQEBNwETETMDtof+uP5nUgG024fsAqUBhmr+aP4wA2gAAQBGAAADtgWBAAgAHEALAAYDBgkKBgcBBwQALzMvEjkREgE5OREzMTAhIwEBNwETETMDtof+pv5xWgGu4YcDogF5Zv5r/ZsD+gAAAAEARgAAAnEFgQAHACBADQAFAQMBCAkCBQYBBgQALzMvEjk5ERIBOTkRMzMxMCEjEQE3AREzAnGK/l9aAUeKA54Bf2T+1wEpAAEANQAAA7YFgQAIACBADgABBAEJCgIGAwMFAQcFAC8zLxIXORESATk5ETMxMCEjEQEBNwEBMwO2h/60/lJrAUcBSIcEef3XAuNO/d8CIQAAAQA1AAADtgWBAAkAIkAPAAcBBAEKCwIGAwMFAQgFAC8zLxIXORESATk5ETMzMTAhIxEBATcBAREzA7aH/rT+UmsBYAEvhwOi/rAC40z9sgEtASEAAQBQAAADtgWBAAkAIkAOAAcBBAEKCwMGBgUBCAUALzMvEjkvMxESATk5ETMzMTAhIxEhATcBIREzA7aH/qj+eXMBZAEIhwKcAp1I/aICXgABAC0AAAO2BYEACQAgQA0ABwEEAQoLAgcFAQgFAC8zLxI5ORESATk5ETMzMTAhIxEBATcBFxEzA7aH/p7+YHcBk/iHAQ4BYwLKRv1G+AOyAAEASAAAA7YFgQAGABxACwAEAgQHCAQDAQUDAC8zLxI5ERIBOTkRMzEwISMBNwERMwO2h/0ZdgJxhwU9RPuaBGYAAAAAAQAtAAACcQWBAAcAIEANAAUBAwEICQIFBAEGBAAvMy8SOTkREgE5OREzMzEwISMRATcBETMCcYr+RnMBR4oCVgLjSP3fAiEAAQAlAAADtgWBAAgAIEAOAAEEAQkKAgYDAwUBBwUALzMvEhc5ERIBOTkRMzEwISMRAQE3AQEzA7aH/rj+PnsBRwFIhwQZ/N0EWjH83QMjAAABACUAAAO2BYEACQAjQBAABwEEAQoLAgYDBwQFAQgFAC8zLxIXORESATk5ETMzMTAhIxEBATcBATUzA7aH/sP+M38BWAEzhwOg/YUEJzX85wJnsgABACUAAAO2BYEACQAjQBAABwEEAQoLAgYDBwQFAQgFAC8zLxIXORESATk5ETMzMTAhIxEBATcBAREzA7aH/rL+RHsBcgEdhwJW/rAESjH8fQESAnEAAAAAAQAnAAADtgWBAAkAIkAOAAcBBAEKCwMGBgUBCAUALzMvEjkvMxESATk5ETMzMTAhIxEhATcBIREzA7aH/pb+YnkBgwEMhwFSA/4x/FgDqAABAC8AAAO2BYEACAAcQAsABgMGCQoGBAEHBAAvMy8SORESATk5ETMxMCEjAQE3ARcRMwO2h/64/kh/AazVhwFIBAI3/BXYBMMAAAAAAQAlAAACcQWBAAcAIEANAAUBAwEICQIFBAEGBAAvMy8SOTkREgE5OREzMzEwISM1ATcBETMCcYr+PnsBR4r0BFwx/N0DIwAAAQAdAAADtgWBAAgAIEANAAEEAQkKBgIFAQMHBQAvMy8zEjk5ERIBOTkRMzEwISMRAQE3AQEzA7aH/rj+Nn0BTQFIhwPX/CkFUi/8KQPXAAABABcAAAO2BYEACQAkQBAABwEEAQoLAgYHAwgBAwUIAC8zLzMSFzkREgE5OREzMzEwISMRAQE3AQE1MwO2h/64/jB/AVgBQYcDdfyLBVAv/BcDaoEAAAAAAQA1AAADtgWBAAkAJEAQAAcBBAEKCwIGBwMIAQMFCAAvMy8zEhc5ERIBOTkRMzMxMCEjEQEBNwEBETMDtof+uP5OfwFOAS2HAo/9cQVUK/vjAm8BsAAAAAEANQAAA7YFgQAJACRAEAAHAQQBCgsCBgcDCAEDBQgALzMvMxIXORESATk5ETMzMTAhIxEBATcBJREzA7aH/rj+Tn8BdQEGhwE3/skFVCv7ffoDiwAAAAABAC8AAAO2BYEABwAaQAoABQIFCAkEAQYDAC8zLzMREgE5OREzMTAhIQE3ATMRMwO2/iv+ToEBlOuHBVQt+wYE+gAAAAEAKQAAAnEFgQAGABxACwAEAgQHCAQDAQUDAC8zLxI5ERIBOTkRMzEwISMBNwERMwJxiv5CgQE9igVULfwpA9cAAAAAAQBQAAADtgWBAAcAHEALAAEFAQgJBAYBAwYALzMvEjkREgE5OREzMTAhIxEhAScBIQO2h/7N/rRgAXMB8wT6/rZiAW8AAAAAAQBMAAADtgWBAAkAI0AQAAcBBQEKCwMHBAIEBgEIBgAvMy8SFzkREgE5OREzMzEwISMRAQEnAQERMwO2h/62/sNcAZsBSIcDkQE2/ttmAXn+zQEzAAAAAAEATAAAA7YFgQAJACNAEAAHAQUBCgsDBAcCBAYBCAYALzMvEhc5ERIBOTkRMzMxMCEjEQEBJwEBETMDtof+mP7bVgGbAUiHAikCgf72aAF5/cECPwAAAAABAEwAAAO2BYEACQAjQBAABwEFAQoLAwQHAgQGAQgGAC8zLxIXORESATk5ETMzMTAhIzUBBScBAREzA7aH/oP+7lQBmwFIh+kDrfZoAXn80wMtAAABAF4AAAO2BYEACAAgQA4ABgQGCQoCAwYDBQEHBQAvMy8SFzkREgE5OREzMTAhIwEFJwEBETMDtof+jf7yUAGcATWHBInnZgF5/D0DwwAAAAEATAAAAnEFgQAGABxACwABBAEHCAMCBQEFAC8vEjk5ERIBOTkRMzEwISMRAScBMwJxiv7DXgGbigTD/t1oAXkAAAAAAQCgAAADtgWBAAgAIkAOAAEEAQkKAgEEBQUHAQcALy8SOS8zETkREgE5OREzMTAhIxEFITUhATMDtof/AP5xAVQBO4cExeKHARcAAAEAoAAAA7YFgQAJACZAEAAHAQQBCgsCBwEEBQUIAQgALy8SOS8zETk5ERIBOTkRMzMxMCEjEQEhNSEBETMDtof+nP7VAWABL4cCaAF7h/7DAlQAAAEAoAAAA7YFgQAJACZAEAAHAQQBCgsCBwEEBQUIAQgALy8SOS8zETk5ERIBOTkRMzMxMCEjEQEhNSEBETMDtof+nv7TAX8BEIcBNQKuh/3yAyUAAAEAoAAAA7YFgQAIACJADgAGAwYJCgYBAwQEBwEHAC8vEjkvMxE5ERIBOTkRMzEwISMBITUhExEzA7aH/qz+xQGd8ocD44f9QAPXAAABAFAAAAO2BYEACAAfQA4AAQQBCQoCBQYDBAcBBwAvLxIXORESATk5ETMxMCEjEQEBNwEBMwO2h/64/mlgAR8BYIcEef3VAaBi/tsCVgAAAAEATAAAA7YFgQAJACJAEAAHAQQBCgsFAgYDBwUIAQgALy8SFzkREgE5OREzMzEwISMRAQE3AQERMwO2h/64/mVgATsBSIcDsP64AZxg/scBRgEQAAEARgAAA7YFgQAJACRADwAHAQQBCgsFAQMGBggBCAAvLxI5LzMRORESATk5ETMzMTAhIxEhATcBIREzA7aH/mf+sGIBKwFchwKcAXRd/rYCXgAAAAEATAAAA7YFgQAHACBADgAFAQMBCAkEBQIDBgEGAC8vEhc5ERIBOTkRMzMxMCEjEQE3AREzA7aH/R1eAoWHAScC42X9egOYAAEATAAAA7YFgQAIABxACwAGAwYJCgYEBwEHAC8vEjk5ERIBOTkRMzEwISMBATcBExEzA7aH/rj+ZVwBrtmHAnMBmWX+Uv5kBFoAAAABAEwAAAJxBYEABwAgQA4ABQEDAQgJBAUCAwYBBgAvLxIXORESATk5ETMzMTAhIxEBNwERMwJxiv5lXgE9igJkAZxk/sMCWgABAC8AAAO2BYEACAAfQA4AAQQBCQoFAgYDBAcBBwAvLxIXORESATk5ETMxMCEjEQEBNwEBMwO2h/64/kh1ATcBVIcEHfzlA2BI/ZgDPwAAAAEAOQAAA7YFgQAJACJAEAAHAQQBCgsFAgcGAwUIAQgALy8SFzkREgE5OREzMzEwISMRAQE3AQE1MwO2h/64/lJ1ATkBSIcDnv1wA0hG/aECkLQAAAEAPwAAA7YFgQAJACJAEAAHAQQBCgsFAgcGAwUIAQgALy8SFzkREgE5OREzMzEwISMRAQE3AQERMwO2h/7D/k13AVwBHYcCc/6kA1g9/VQBOQJIAAEAPwAAA7YFgQAJACRADwAHAQQBCgsFAQMGBggBCAAvLxI5LzMRORESATk5ETMzMTAhIxEhATcBIREzA7aH/qT+bHcBbwEKhwFWAxk9/TEDpAAAAAEAPwAAA7YFgQAIABxACwAGAwYJCgYEBwEHAC8vEjk5ERIBOTkRMzEwISMBATcBFxEzA7aH/rT+XHcBmt+HATUDOj383csEwwAAAAABAD8AAAJxBYEABwAgQA4ABQEDAQgJBAUCAwYBBgAvLxIXORESATk5ETMzMTAhIxEBNwERMwJxiv5YdwExigEtA0I9/aoDKwABADUAAAO2BYEACAAgQA4AAQQBCQoFAgYDBwEDBwAvLzMSFzkREgE5OREzMTAhIxEBATcBATMDtof+uP5OfQE1AUiHA9f8KQUIMfxxA9cAAAEANQAAA7YFgQAJACNAEAAHAQQBCgsFAgYHBAgBAwgALy8zEhc5ERIBOTkRMzMxMCEjEQEBNwEBNTMDtof+uP5OfQE8AUGHA3X8iwUIMfxdA2qBAAEANQAAA7YFgQAJACNAEAAHAQQBCgsFAgYCBAgBAwgALy8zEhc5ERIBOTkRMzMxMCEjEQEBNwEBETMDtof+uP5OfwFOAS2HAo/9cQUKLfwrAm8BsAAAAAABADUAAAO2BYEACQAjQBAABwEEAQoLBQIGBwQIAQMIAC8vMxIXORESATk5ETMzMTAhIxEBATcBJREzA7aH/rj+Tn8BdQEGhwE3/skFCi37xfoDiwABADUAAAO2BYEABwAeQAwABQIFCAkDBAYEAQYALy8zERI5ERIBOTkRMzEwISEBNwEzETMDtv4x/k5/AZzfhwUKLftQBPoAAAABADUAAAJxBYEABgAcQAsABAIEBwgEAwUBBQAvLxI5ORESATk5ETMxMCEjATcBETMCcYr+Tn8BM4oFCi38eQPRAAAAAAEATAAAA7YFgQAHABxACwABBQEICQQGAQMGAC8zLxI5ERIBOTkRMzEwISMRIQEnASEDtof+4f6qbgF3AfME+v3CSgJ7AAAAAAEATAAAA7YFgQAJACNAEAAHAQUBCgsDBwIEBAYBCAYALzMvEhc5ERIBOTkRMzMxMCEjEQEBJwEBETMDtof+0f66bgGbAUiHA5EBH/4MSgJ7/s0BMwAAAAABAEwAAAO2BYEACQAjQBAABwEFAQoLAwcCBAQGAQgGAC8zLxIXORESATk5ETMzMTAhIxEBAScBAREzA7aH/rD+224BmwFIhwIpAlT+P0oCe/3BAj8AAAAAAQBMAAADtgWBAAkAI0AQAAcBBQEKCwMEBwIEBgEIBgAvMy8SFzkREgE5OREzMzEwISM1AQEnAQERMwO2h/6c/u9uAZsBSIfpA3X+XkoCe/zTAy0AAQBMAAADtgWBAAgAIEAOAAYEBgkKAgMGAwUBBwUALzMvEhc5ERIBOTkRMzEwISMBAScBAREzA7aH/p7+7W4BrgE1hwRS/mpKAnv8PQPDAAABAGgAAAJxBYEABgAcQAsAAQQBBwgDAgUBBQAvLxI5ORESATk5ETMxMCEjEQEnATMCcYr+8G8Bf4oEf/47TAJ7AAAAAAEATAAAA7YFgQAGABxACwABBAEHCAIDBQEFAC8vEjk5ERIBOTkRMzEwISMRAScBMwO2h/1rTgLjhwTP/dFmAnsAAAAAAQBGAAADtgWBAAkAJEAPAAcBBQEKCwQBAwYGCAEIAC8vEjkvMxE5ERIBOTkRMzMxMCEjESEBJwEhETMDtof+v/66YgFqAX+HA+P+mFwBkwEXAAAAAQBMAAADtgWBAAkAIkAQAAcBBQEKCwYDBwIEBQgBCAAvLxIXORESATk5ETMzMTAhIxEBAScBAREzA7aH/rb+w1wBmwFIhwKBATX+22cBef7KAkYAAQBMAAADtgWBAAkAIkAQAAcBBQEKCwYDBAcCBQgBCAAvLxIXORESATk5ETMzMTAhIxEBAScBAREzA7aH/pj+21YBmwFIhwEZAoH+9WkBef3AA1AAAQBMAAADtgWBAAgAH0AOAAYEBgkKBQIDBgQHAQcALy8SFzkREgE5OREzMTAhIwEDJwEBETMDtof+aeZmAXIBcYcDtv7mXAHA/KQEJQAAAAABAEwAAAJxBYEABwAgQA4ABQEEAQgJAgMFAwYBBgAvLxIXORESATk5ETMzMTAhIxEBJwE1MwJxiv7DXgGbigPJ/sBnAZv2AAABAKAAAAO2BYEACAAiQA4AAQQBCQoCAQQFBQcBBwAvLxI5LzMRORESATk5ETMxMCEjEQMhNSEBMwO2h+f+WAFWATmHBFr+QocCXgAAAQCgAAADtgWBAAkAJkAQAAcBBAEKCwIHAQQFBQgBCAAvLxI5LzMROTkREgE5OREzMzEwISMRByE1IQERMwO2h/L+YwFmASmHA5r+hwE5ASUAAAAAAQCgAAADtgWBAAkAJkAQAAcBBAEKCwIHAQQFBQgBCAAvLxI5LzMROTkREgE5OREzMzEwISMRASE1IQERMwO2h/6e/tMBYgEthwElAXeH/sQDmgAAAQCgAAADtgWBAAgAIkAOAAYDBgkKBgEDBAQHAQcALy8SOS8zETkREgE5OREzMTAhIwEhNSEBETMDtof+qP7JAYkBBocCnIf+BARaAAEAVgAAA7YFgQAIAB9ADgABBAEJCgIFBgMEBwEHAC8vEhc5ERIBOTkRMzEwISMRAQE3BQEzA7aH/sH+Zl4BCgFxhwQl/RABjF7+A2AAAAAAAQBWAAADtgWBAAkAIkAQAAcBBAEKCwIHBQYDBQgBCAAvLxIXORESATk5ETMzMTAhIxEBATcBATUzA7aH/rj+b14BEwFohwN//b4BhF7++AJ48gAAAQBYAAADtgWBAAkAIkAQAAcBBAEKCwUCBwYDBQgBCAAvLxIXORESATk5ETMzMTAhIxEBATcBAREzA7aH/rj+cVwBMwFIhwJt/soBg2f+1QExAloAAQBWAAADtgWBAAkAJEAPAAcBBAEKCwUBAwYGCAEIAC8vEjkvMxE5ERIBOTkRMzMxMCEjESEBNwEhETMDtof+mP6PYgFMASuHAVYBbVz+vgOkAAAAAQBWAAADtgWBAAYAHEALAAQCBAcIBAMFAQUALy8SOTkREgE5OREzMTAhIwE3AREzA7aH/SdeAnuHAsFe/Z0ExQAAAAABAFwAAAJxBYEABwAgQA4ABQEDAQgJBAUCAwYBBgAvLxIXORESATk5ETMzMTAhIxEBNwERMwJxiv51XAEvigEzAYtn/tEDiwABADcAAAO2BYEACAAgQA4AAQQBCQoCBQYDBwEDBwAvLzMSFzkREgE5OREzMTAhIxEBATcBATMDtof+y/49cQErAVyHA8P8PQLZTP4YBEQAAAEANwAAA7YFgQAJACNAEAAHAQQBCgsCBQYHBAgBAwgALy8zEhc5ERIBOTkRMzMxMCEjEQEBNwEBNTMDtof+uP5QbwErAV6HAy380wLZTP4KA2npAAEANwAAA7YFgQAJACNAEAAHAQQBCgsFAgYHBAgBAwgALy8zEhc5ERIBOTkRMzMxMCEjEQEBNwEBETMDtof+uP5QbwE/AUqHAj/9wQLZTP3nAkwCKQAAAAABADf//gO2BYEACQAjQBAABwEEAQoLBQIGBwQIAQMIAC8vMxIXORESATk5ETMzMTAhIxEBATcBAREzA7aH/rj+UHEBWAEvhwFY/qYC20z9uAFCA2IAAAAAAQA3AAADtgWBAAcAHkAMAAUCBQgJAwQGBAEGAC8vMxESORESATk5ETMxMCEhATcBMxEzA7b+Mf5QcQGL/IcC2Uz9YgT6AAAAAQA3AAACcQWBAAYAHEALAAQCBAcIBAMFAQUALy8SOTkREgE5OREzMTAhIwE3AREzAnGK/lBxAT+KAtlM/eEEewAAAAABAEgAAAO2BYEABwAcQAsAAQUBCAkEBgEDBgAvMy8SORESATk5ETMxMCEjESMBJwEhA7aH7f5/eQGZAdUE+vxcMQP6AAEARgAAA7YFgQAJACNAEAAHAQUBCgsDBwIEBAYBCAYALzMvEhc5ERIBOTkRMzMxMCEjEQEBJwEBETMDtof+4/6vewGbAU6HA3kBEPzNMQP6/rABUAAAAAABAEYAAAO2BYEACQAjQBAABwEFAQoLAwcCBAQGAQgGAC8zLxIXORESATk5ETMzMTAhIxEBAScBAREzA7aH/s/+w3sBoQFIhwJiAfL9AjED+v3nAhkAAAAAAQBGAAADtgWDAAkAI0AQAAcBBQEKCwMHBAIEBgEIBgAvMy8SFzkREgE5OREzMzEwISM1AQEnAQERMwO2h/60/t57AaEBSIeeA3T9RDED/PyiA1wAAQBGAAADtgWBAAgAIEAOAAYEBgkKAgYDAwUBBwUALzMvEhc5ERIBOTkRMzEwISMBAScBAREzA7aH/qz+5nsBoQFIhwP+/VgxA/r8KQPXAAABAEYAAAJxBYEABgAcQAsAAQQBBwgDAgUBBQAvLxI5ORESATk5ETMxMCEjEQEnATMCcYr+2nsBoYoEHf05MQP6AAAAAAEAgQAAA7YFgQAIABxACwABBQEJCgQCBwEHAC8vEjk5ERIBOTkRMzEwISMRBwEnAQEzA7aH3/6odwFiAUyHBMPL/V49ArkBNQAAAAABAIEAAAO2BYEACQAkQA8ABwEFAQoLBAEDBgYIAQgALy8SOS8zETkREgE5OREzMzEwISMRIwEnASERMwO2h+n+sncBcwE7hwPj/XM9AtcBFwAAAAABADkAAAO2BYEACQAiQBAABwEFAQoLBgMHAgQFCAEIAC8vEhc5ERIBOTkRMzMxMCEjEQEBJwEBETMDtof+0f6udQGuAUiHAlgBjf1xSANF/lQCSgABADkAAAO2BYEACQAiQBAABwEFAQoLBgMHAgQFCAEIAC8vEhc5ERIBOTkRMzMxMCEjNQEBJwEBETMDtof+sP7PdQGuAUiHtALw/bJIA0f9HwN9AAABADkAAAO2BYEACAAfQA4ABgQGCQoFAgYDBAcBBwAvLxIXORESATk5ETMxMCEjAQEnAQERMwO2h/6o/td1Aa4BSIcDk/3DSANH/JYEBgAAAAEAOQAAAnEFgQAHACBADgAFAQQBCAkCAwUDBgEGAC8vEhc5ERIBOTkRMzMxMCEjEQEnATUzAnGK/sl3Aa6KA7j9oD4DS6AAAAEAkQAAA7YFgQAIABxACwABBQEJCgQCBwEHAC8vEjk5ERIBOTkRMzEwISMRAwEnAQEzA7aH2f6WWwFWAUiHBFr+ZP6YYgFWAnMAAAABAJEAAAO2BYEABwAgQA4GAwcCBwgJAwABAwQHBAAvLxIXORESATk5ETMzMTABAScBNTMRIwMv/b1bAp6HhwPb/XtiAuTl+n8AAAAAAQCRAAADtgWBAAkAJEAPAAcBBQEKCwQBAwYGCAEIAC8vEjkvMxE5ERIBOTkRMzMxMCEjESEBJwEhETMDtof+3/7eWwFIAVaHApz+umIBawJeAAAAAQCRAAADtgWBAAkAIkAQAAcBBQEKCwYDBwIEBQgBCAAvLxIXORESATk5ETMzMTAhIxEBAScBAREzA7aH/r3/AFsBVgFIhwFCATH+42IBef7NA4MAAQCRAAADtgWBAAgAH0AOAAYEBgkKBQIDBgQHAQcALy8SFzkREgE5OREzMTAhIwEHJwEBETMDtof+nt1fAVYBSIcCTPhkAXn90wR9AAEATAAAAnEFgQAHACBADgAFAQQBCAkCAwUDBgEGAC8vEhc5ERIBOTkRMzMxMCEjEQEnAREzAnGK/sNeAZuKApP+w2QBnAIrAAEAoAAAA7YFgQAIACJADgABBAEJCgIBBAUFBwEHAC8vEjkvMxE5ERIBOTkRMzEwISMRAyE1IQEzA7aH2/5MAVIBPYcD1/1/hwOkAAABAKAAAAO2BYEACQAmQBAABwEEAQoLAgcBBAUFCAEIAC8vEjkvMxE5ORESATk5ETMzMTAhIxEDITUhAREzA7aH8P5hAU4BQYcDJf4xhwJvATUAAAABAKAAAAO2BYEACQAmQBAABwEEAQoLAgcBBAUFCAEIAC8vEjkvMxE5ORESATk5ETMzMTAhIxEBITUhAREzA7aH/tX+nAEeAXGHApP+w4cBhwIdAAABAKAAAAO2BYEACAAiQA4HBAEECQoECAECAgUIBQAvLxI5LzMRORESATk5ETMxMAEhNSEBETMRIwG+/uIBZAErh4cBVof+7AS4+n8AAQBMAAADtgWBAAgAIEAOAAEEAQkKAgUGAwcBAwcALy8zEhc5ERIBOTkRMzEwISMRAQE3BQEzA7aH/rj+ZVwBAAGHhwPT/C0BeWbrBI0AAAABAEwAAAO2BYEACQAjQBAABwEEAQoLBwIFBgQIAQMIAC8vMxIXORESATk5ETMzMTAhIxEBATcFATUzA7aH/rj+ZVwBCgF9hwNW/KoBeWb4A9rAAAABAEwAAAO2BYEACQAjQBAABwEEAQoLAgcFBgQIAQMIAC8vMxIXORESATk5ETMzMTAhIxEBATcBAREzA7aH/rj+ZVwBHQFqhwI//cEBeWb++gJ/AikAAAAAAQBMAAADtgWBAAkAI0AQAAcBBAEKCwUHAgYECAEDCAAvLzMSFzkREgE5OREzMzEwISMRAQE3AQERMwO2h/64/mVcAT0BSocBM/7NAXlm/tsBNgORAAAAAAEAUAAAA7YFgQAHAB5ADAAFAgUICQMEBgQBBgAvLzMREjkREgE5OREzMTAhIQE3ASERMwO2/jH+aVwBeQEKhwF3aP6oBPoAAAEAUAAAAnEFgQAGABxACwAEAgQHCAQDBQEFAC8vEjk5ERIBOTkRMzEwISMBNwERMwJxiv5pXAE7igF3aP7fBMMAAAAAAQAvAAADtgWBAAcAGkAKAAEFAQgJAQQDBgAvMy8zERIBOTkRMzEwISMRIwEnASEDtofr/myBAbIB1QT6+wYtBVQAAAABADUAAAO2BYEACQAkQBAABwEFAQoLAwcCAwYBBAgGAC8zLzMSFzkREgE5OREzMzEwISMRJQEnAQERMwO2h/76/ot/AbIBSIcDi/r7ey0FVP7JATcAAAAAAQA1AAADtgWBAAkAJEAQAAcBBQEKCwMHAgMGAQQIBgAvMy8zEhc5ERIBOTkRMzMxMCEjEQEBJwEBETMDtof+2/6qfwGyAUiHAkoB7fvJLQVU/dUCKwAAAAEAFwAAA7YFgQAJACRAEAAHAQUBCgsDBwIDBgQBCAYALzMvMxIXORESATk5ETMzMTAhIzUBAScBAREzA7aH/sP+pH8B0AFIh+kDEfwILwVQ/NsDJQAAAAABAB0AAAO2BYEACAAgQA0ABgQGCQoGAgUBAwcFAC8zLzMSOTkREgE5OREzMTAhIwEBJwEBETMDtof+uP6zfQHKAUiHA9f8KS8FUvwpA9cAAAEAKQAAAnEFgQAGABxACwABBAEHCAIFAQMFAC8vMxI5ERIBOTkRMzEwISMRAScBMwJxiv7DgQG+igPX/CktBVQAAAAAAQAvAAADtgWBAAgAHEALAAEFAQkKAgcBBAcALy8zEjkREgE5OREzMTAhIxEHAScBATMDtofV/lR/AbgBSIcEw9f8FDcEAgFIAAAAAAEAJwAAA7YFgQAJACJADgAHAQUBCgsDBgYIAQQIAC8vMxI5LzMREgE5OREzMzEwISMRIwEnASERMwO2h/T+ZXkBtAFUhwPj/B0xBDkBFwAAAQAlAAADtgWBAAkAI0AQAAcBBQEKCwYDBwIECAEECAAvLzMSFzkREgE5OREzMzEwISMRAQEnAQERMwO2h/7j/o57AbwBTocCcQES/H0xBEr+sAJWAAAAAAEAJQAAA7YFgQAJACNAEAAHAQUBCgsGAwcCBAgBBAgALy8zEhc5ERIBOTkRMzMxMCEjNQEBJwEBETMDtof+1/6efwHXATOHxwJo/NE1BED9gQOLAAEAJQAAA7YFgQAIACBADgAGBAYJCgUCBgMHAQMHAC8vMxIXORESATk5ETMxMCEjAQEnAQERMwO2h/64/rl7AcIBSIcDI/zdMQRa/N0EGQAAAQAlAAACcQWBAAcAIEANAAUBBAEICQIFBgEDBgAvLzMSOTkREgE5OREzMzEwISMRAScBNTMCcYr+uXsBwooDefyHMQTBjwAAAQBIAAADtgWBAAYAHEALAAEEAQcIAgUBAwUALy8zEjkREgE5OREzMTAhIxEBJwEzA7aH/Y92AueHBGb7mkQFPQAAAAABAC0AAAO2BYEACQAgQA0ABwEFAQoLAgcIAQQIAC8vMxI5ORESATk5ETMzMTAhIxEHAScBAREzA7aH+P5tdwGgAWKHA7L4/UZGAsoBYwEOAAEAUAAAA7YFgQAJACJADgAHAQUBCgsDBgYIAQQIAC8vMxI5LzMREgE5OREzMzEwISMRIQEnASERMwO2h/74/pxzAYcBWIcCnP1kSALbAl4AAQA1AAADtgWBAAkAI0AQAAcBBQEKCwYDBwIECAEECAAvLzMSFzkREgE5OREzMzEwISMRAQEnAQERMwO2h/7R/qBrAa4BTIcBIQEt/bJMAuP+sAOiAAAAAAEANQAAA7YFgQAIACBADgAGBAYJCgUCBgMHAQMHAC8vMxIXORESATk5ETMxMCEjAQEnAQERMwO2h/64/rlrAa4BTIcCIf3fTgLj/dcEeQAAAQAtAAACcQWBAAcAIEANAAUBBAEICQUCBgEDBgAvLzMSOTkREgE5OREzMzEwISMRAScBETMCcYr+uXMBuooCIf3fSALjAlYAAQBGAAADtgWBAAgAHEALAAEFAQkKAgcBBAcALy8zEjkREgE5OREzMTAhIxEDAScBATMDtofh/lJaAY8BWocD+v2c/mpmAXkDogAAAAEATgAAA7YFgQAJACBADQAHAQUBCgsHAggBBAgALy8zEjk5ERIBOTkRMzMxMCEjEQMBJwEBNTMDtofb/kxSAZkBSIcDaP4w/mhqAYYCpusAAQBOAAADtgWBAAcAIEANAAUBBAEICQUCBgMBBgAvLzMSOTkREgE5OREzMzEwISMRAScBETMDtof9fV4C4YcCc/2ZXgLNAkoAAQBSAAADtgWBAAkAIkAOAAcBBQEKCwMGBggEAQgALy8zEjkvMxESATk5ETMzMTAhIxEhAScBIREzA7aH/s3+uGIBcQFshwFW/rRgAXMDpAABAE4AAAO2BYEACAAgQA4ABgQGCQoFAgYDBwMBBwAvLzMSFzkREgE5OREzMTAhIwEBJwEBETMDtof+uP7DXAGZAUiHATP+1WABhv7MBMcAAAEARgAAAnEFgQAHACBADQAFAQQBCAkCBQYBAwYALy8zEjk5ERIBOTkRMzMxMCEjEQEnAREzAnGK/rlaAaGKASn+12QBfwOeAAEAoAAAA7YFgQAIACBADQABBAEJCgECBQcFBAcALy8zERI5xBESATk5ETMxMCEjEQEhNSEBMwO2h/7f/pIBDgGBhwO8/ESHBPoAAAABAKAAAAO2BYEACQAkQA8ABwEEAQoLAQIHBQgFBAgALy8zERI5OcQREgE5OREzMzEwISMRASE1IQE1MwO2h/7f/pIBEAF/hwMn/NmHBCnRAAEAoAAAA7YFgQAJACRADwAHAQQBCgsBAgcFCAUECAAvLzMREjk5xBESATk5ETMzMTAhIxEBITUhAREzA7aH/uH+kAEcAXOHAkT9vIcC9AIGAAAAAAEAoAAAA7YFgQAJACRADwAHAQQBCgsBAgcFCAUECAAvLzMREjk5xBESATk5ETMzMTAhIxEBITUhAREzA7aH/tX+nAEzAVyHASv+1YcBXAOeAAAA//8Acf/sBM0G/gImAX4AAAEHCT8AxwAAABKyBAMCuP/wtEhHDxklASs1NTX//wBx/+wEzQb+AiYBfgAAAQcJQADFAAAAErIEAwK4/+60MC8PGSUBKzU1Nf//AHH/7ATNBv4CJgF+AAABBwlBAMcAAAASsgQDArj/8LQwLw8ZJQErNTU1//8Acf/sBM0G/gImAX4AAAEHCUIAxQAAABKyBAMCuP/utDAvDxklASs1NTX//wBx/+wEzQc7AiYBfgAAAQcJVwDHAAAAErIEAwK4//C0My0PGSUBKzU1Nf//AHH/7ATNBzsCJgF+AAABBwlWAMcAAAASsgQDArj/8LQzLQ8ZJQErNTU1//8Acf/sBM0HOwImAX4AAAEHCVUAxwAAABKyBAMCuP/wtDMtDxklASs1NTX//wBx/+wEzQc7AiYBfgAAAQcJVADHAAAAErIEAwK4//C0My0PGSUBKzU1Nf///+L/7AKgBv4CJgGGAAABBwk//3gAAAAQQAkDAgEQKyoPACUBKzU1NQAA////4v/sAqAG/gImAYYAAAEHCUD/eAAAABBACQMCARATEg8AJQErNTU1AAD////i/+wCoAb+AiYBhgAAAQcJQf94AAAAEEAJAwIBEBMSDwAlASs1NTUAAP///+L/7AKgBv4CJgGGAAABBwlC/3gAAAAQQAkDAgEQExIPACUBKzU1NQAA////1v/sAqAHOwImAYYAAAEHCVf/eAAAABBACQMCARAWEA8AJQErNTU1AAD////W/+wCoAc7AiYBhgAAAQcJVv94AAAAEEAJAwIBEBYQDwAlASs1NTUAAP///9b/7AKgBzsCJgGGAAABBwlV/3gAAAAQQAkDAgEQFhAPACUBKzU1NQAA////1v/sAqAHOwImAYYAAAEHCVT/eAAAABBACQMCARAWEA8AJQErNTU1AAD//wCi/+wEeQb+AiYBkgAAAQcJPwDZAAAAErIDAgG4/+W0MC8EEiUBKzU1Nf//AKL/7AR5Bv4CJgGSAAABBwlAANcAAAASsgMCAbj/5rQYLQQSJQErNTU1//8Aov/sBHkG/gImAZIAAAEHCUEA2QAAABKyAwIBuP/ltBgXBBIlASs1NTX//wCi/+wEeQb+AiYBkgAAAQcJQgDXAAAAErIDAgG4/+W0GC0EEiUBKzU1Nf//AKL/7AR5BzsCJgGSAAABBwlXANkAAAASsgMCAbj/5bQbFQQSJQErNTU1//8Aov/sBHkHOwImAZIAAAEHCVYA2QAAABKyAwIBuP/ltBsVBBIlASs1NTX//wCi/+wEeQc7AiYBkgAAAQcJVQDZAAAAErIDAgG4/+W0GxUEEiUBKzU1Nf//AKL/7AR5BzsCJgGSAAABBwlUANkAAAASsgMCAbj/5bQbFQQSJQErNTU1////4v/sAqAHjQImAYYAAAEHCVP/eAAAABJACgQDAgEQKyoPACUBKzU1NTX////i/+wCoAeNAiYBhgAAAQcJUv94AAAAEkAKBAMCARArKg8AJQErNTU1Nf///9b/7AKgB40CJgGGAAABBwlR/3gAAAASQAoEAwIBEBAiDwAlASs1NTU1////1v/sAqAHjQImAYYAAAEHCVD/eAAAABJACgQDAgEQECIPACUBKzU1NTX//wCi/+wEeQeNAiYBkgAAAQcJUwDZAAAAFLMEAwIBuP/ltDAvBBIlASs1NTU1AAD//wCi/+wEeQeNAiYBkgAAAQcJUgDZAAAAFLMEAwIBuP/ltDAvBBIlASs1NTU1AAD//wCi/+wEeQeNAiYBkgAAAQcJUQDZAAAAFLMEAwIBuP/ltDMtBBIlASs1NTU1AAD//wCi/+wEeQeNAiYBkgAAAQcJUADZAAAAFLMEAwIBuP/ltDMtBBIlASs1NTU1AAAAAQDJ/nsFCgXLAB4APEAfFxMTFAALCwUUAx8gFxQbFQMUEhsPaVkbBAMIaVkDIgA/KwAYPysAGD8/ERI5ERIBFzkRMxEzETMxMCUUBiMiJzUWMzI2NRE0JiMiBhURIxEzFzM2NjMyBBUFCs++YjpHVWZvn7XBvLiRHwo25HP1AQUMwdAbmxR1bgPNvqDO5PyHBba8XHX96wABAMf+ewVOBbYAGQA7QB4KDQ0OFAgXEhIIAg4EGhsSCg4VDwMIDhIABWlZACIAPysAGD8zPzMSOTkREgEXOREzETMRMxEzMTABIic1FjMyNjcBIxYVESMRMwEzJjURMxEUBgPNYjpHVWZtAvzGCBGq1QMMCA6sx/57G5sUdW4Evv+m/OcFtvttmv8C+vpWxM0AAAAAAQC6/+wE/AXLACIAVUAtGxcJCRgGAA8GDyMkHBsbGQ8YLxg/GAMOAxgYGQcHAxkDHxNpWR8EAwxpWQMTAD8rABg/KwAYPxI5LxE5L19eXRI5ETMREgE5OREzETMzETMzMTABFAAhIgA1NTMVFBYzMjY1ETQmIyIGFRUjETMXMzY2MzIEFQT8/t3+/P7+47m2tq63oLTBvLmSHgs143X1AQUCBPn+4QEd/xUbs8TEtQHJvqDO5JgC1bxbdv3rAAAAAgCu/+wEsAYfABMAJgBXQC8cHR0OACYaDiAFBQ4JJgQnKBwJCgoJXVkPCh8KAhAFCgojFxcRXVkXASMCXVkjFgA/KwAYPysREgA5GC9fXl0rERIAORESARc5ETMRMxEzETMRMzEwARAhMjY1NCYjIzUzMjY1NCYjIBEnNDYzMhYVEAUVFhYVFAQjIiQ1AWIBUKmhuKxtWJWemIv+z7T+7N36/si7vv7z9fT+9AGs/tOdkJifmI6GeYH+tgng99C3/tozCBXHu9Dk5NAAAwBqBMkCyQb+AAcAFwAbAExAMg4VGgADFQgRGwccHRULBBEPBAEEEQQBQAoNSAEBGw8YLxhfGH8YjxifGL8YzxjvGAkYAC9dMzMvKzMzL10vEMQyERIBFzkRMzEwASMmJzUzFhclNDYzMhYVFAYHNTY2NSImAyEVIQKyVohDxRlD/ew9LTI3ang4QS09NAJf/aEFpqmGFH+s3jQuRDVteg1MAzArL/7wkQAAAAMAagTJAs8G/gADABMAGwBPQDMKERgCAhsRBA0DBhwdEQcWDQ8WARYNFhtACg1IGxsDDwAvAF8AfwCPAJ8AvwDPAO8ACQAAL10yMi8rMzMvXS8QxDIREgEXOREzETMxMBMhFSETNDYzMhYVFAYHNTY2NSImFzY3MxUGByNqAl/9oUY+LTE3aXg4QS0+/joixUOIVgVakQHTNC5ENW16DUwDMCsvrJWWFIapAAADAGoEyQLJBv4AAwALABsATEAyDxYCBAcTDBYDBxwdDxkIEw8IAQgTCAVACg1IBQUDDwAvAF8AfwCPAJ8AvwDPAO8ACQAAL10yMi8rMzMvXS8QxDIREgEXOREzMTATIRUhJSMmJzUzFhclFAYjFBYXFSYmNTQ2MzIWagJf/aECOlaIQ8UZQ/66PS1BOHhqNzItPQVakd2phhR/rN4yLyswA0wNem01RC4AAAAAAwBqBMkCzQb+AAMAEwAbAE9AMwcOGAICGwsEDgMGHB0HERYLDxYBFgsWG0AKDUgbGwMPAC8AXwB/AI8AnwC/AM8A7wAJAAAvXTIyLyszMy9dLxDEMhESARc5ETMRMzEwEyEVIQEUBiMUFhcVJiY1NDYzMhYTNjczFQYHI2oCX/2hAQk+LUE4eGk3MS0+OToixUOIVgVakQHTMi8rMANMDXptNUQu/u6VlhSGqQAAAAAC/vQE2QFoBiEACwAVACNAFQMJbw8BD4BAFQGgFfAVAg8VXxUCFQAvXV1xGsxdxjIxMAE0NjMyFhUUBiMiJhc2NjczFQYGByP+9DgoLjI6Jig42StyJdkptkV3BXM1LzYuNTIyTDevSRU9wDYAAAAAAv6mBNkBWgbNAAsAGAAzQB8DAAkBCQkWEQxwDAEPDAEMD0AWAaAW8BYCDxZfFgIWAC9dXXEzM11dLzMSOS9dMzEwAzQ2MzIWFRQGIyImBzMWFzY3MxUGByMmJ2A4KC8xOiYoOPp7cml+YX/NM7g8wAZoNi84LTQyMhNKc34/G81gZscAA/7RBPgBMQbNAAsAFwAbACdAFw8DAxUACRAJAgkJGyAYAQ8YfxifGAMYAC9dcTMzL10zMxEzMTABNDYzMhYVFAYjIiYlNDYzMhYVFAYjIiYFIRUh/t84KCc6OicoOAGBOCYnOjonJjj+cQJg/aAGaDYvLzY0MjI0Ni8vNjQyMquRAAAAA/6TBNcBbwbNAAsAFwAvAE1ANQ8DdgMBAxVPCV8JbwkDAAkBCR0sCQNPJL8kzyQDJCgJDEgkKSEkA0AYAaAY8BgCDxhfGAIYAC9dXXEXMy8rXRczL11dMzNdETMxMAE0NjMyFhUUBiMiJiU0NjMyFhUUBiMiJhMiLgIjIgYHIzY2MzIeAjMyNjczBgb+3zgoJzo6Jyg4AYE4Jic6OicmODorU09JIjExDl8Mb1wtVU5IIC8yD10NbgZoNi8vNjQyMjQ2Ly82NDIy/qUfJB82LnB6HyUfNi9yeAAAAAL+qATXAVgHDAAXACAAhUAXBBwUHAI/BByEHJQctBwEFBwkHJQcAxy4/8BASRMXSByAixmbGasZAyQZNBkCABkQGQIJAhkUGQUDAAwQDAI6YAxwDIAMA08MvwzPDAMMKAkMSAwRCQwDQAABoADwAAIPAF8AAgAAL11dcRcyLytdcV5dFzMvX15dXV0azStxcl5dMTATIi4CIyIGByM2NjMyHgIzMjY3MwYGAyMmJzUzFhcXkSdNSUYfJioPaApoVSpQSUMeKyYOZgtlY3f0MdojbzAE2R8kHyw4cHofJR82L3B6AR23SxRFfjsAAv6oBNcBWAcMABcAIACFQBcEGxQbAj8EG4QblBu0GwQUGyQblBsDG7j/wEBJExdIG4CLIJsgqyADJCA0IAIAIBAgAgkCIBQgBQMADBAMAjpgDHAMgAwDTwy/DM8MAwwoCQxIDBEJDANAAAGgAPAAAg8AXwACAAAvXV1xFzIvK11xXl0XMy9fXl1dXRrMK3FyXl0xMBMiLgIjIgYHIzY2MzIeAjMyNjczBgYBNzY3MxUGByORJ01JRh8mKg9oCmhVKlBJQx4rJg5mC2X+kzFuI9o37ncE2R8kHyw4cHofJR82L3B6ATU7fkUUT7MAAAAAAv7RBPgBMQcMAAMACwArQByQCaAJAgmAAAUQBTAFAwUFAyAAAQ8AfwCfAAMAAC9dcTIyL10azV0xMAEhFSElIyYnNTMWF/7RAmD9oAG2ees31zGTBYmR67xZFGSsAAAAAv7RBPgBMQcMAAMADQArQByQB6AHAgeAAA0QDTANAw0NAyAAAQ8AfwCfAAMAAC9dcTIyL10azF0xMAEhFSETNjY3MxUGBgcj/tECYP2gqnM9EtogpGF3BYmRAQSSWyMULpxLAAP+pgTPAVoHDAALABcAKABuQCQfHCVPGF8YAoAYkBiwGMAY0BgFIBhAGAIQGDAYgBigGPAYBRi4/8CzLC9IGLj/wEAjICNIGCAlkCWgJQMgJaAlAgAlECUwJQMlJQkVFQMPD18PAg8AL10zMxEzMy9dcXIvKytdcXJdEjk5MTABFAYjIiY1NDYzMhYFFAYjIiY1NDYzMhYDMxYWFzY2NzMVBwYHIyYnJwEfMy4uMjomKTj+fzgnLjI6Jic4+Hs1czM4cTZ/QpYouCyQQAUzMDQ2LjUyMjU1LzYuNTIyAaQjTjc4TSMaQI5LSo9AAAAAAAP+zwTPAS8GsAALABcAGwAfQBAbYBgBGBgJFRUDDw9fDwIPAC9dMzMRMzMvXTMxMAEUBiMiJjU0NjMyFgUUBiMiJjU0NjMyFgMhFSEBHzMuLjI6Jik4/n84Jy4yOiYnOM8CYP2gBTMwNDYuNTIyNTUvNi41MjIBSJEAAAAC/qgE1wFYBrAAFwAbAEFALRuvGL8YzxgDABgBGBQYBQO/DM8MAgwoCQ1IDBEJDANAAAGgAPAAAg8AXwACAAAvXV1xFzIvK10XMy9dXTMxMBMiLgIjIgYHIzY2MzIeAjMyNjczBgYBIRUhkSdNSUYfJioPaApoVSpQSUMeKyYOZgtl/ecCYP2gBNkfJB8sOHB6HyUfNi9wegHXkQAC/s8E2QEvBrIACwAPAC9AIA8PDB8MXwxvDK8M7wwGDAwDA0AJAaAJ8AkCDwlfCQIJAC9dXXEzETMvXTMxMAM0NjMyFhUUBiMiJgMhFSFoPS0wODouLT3JAmD9oAVMPDY9NTY9OAGhkQAAAAAC/s8GKQEvCAIACwAPACNAFg9fDG8MrwzvDAQMDAMDLwk/CX8JAwkAL10zETMvXTMxMAM0NjMyFhUUBiMiJgMhFSFoPS0wODouLT3JAmD9oAacPDY9NTY9OAGhkQAAAAAEAF4E0QLVB40ACwAXACQALABtQEYGABIMGCkMLAAeBi0uJ4AALAEsLBskMB5AHgKvHgE1HgEMHhweLB4DHh4iPxtPGwIvGz8bAhtAEBRIGxsPAwMVDwlfCQIJAC9dMzMRMzMvK11xMzMvXV1dcTMSOS9dGswREgEXOREzETMxMBM0NjMyFhUUBiMiJiU0NjMyFhUUBiMiJhMGBiMiJiczFhYzMjcFNjczFQYHI3s4KC8xOiYoOAF/OCYvMTomJjjbE6uEhp4RcQxWZ6gg/tFSLbJbcWUFNzYvOC00MjI0Ni84LTQyMgHCeIiGejk2bwRsYBR3WAAAAAQAXgTRAtUHjQALABcAJAAsAG1ARgYAEgwYDCUoAB4GLS4pgAAmASYmGyQwHkAeAq8eATUeAQweHB4sHgMeHiI/G08bAi8bPxsCG0AQFEgbGw8DAxUPCV8JAgkAL10zMxEzMy8rXXEzMy9dXV1xMxI5L10azRESARc5ETMRMzEwEzQ2MzIWFRQGIyImJTQ2MzIWFRQGIyImEwYGIyImJzMWFjMyNwcjJic1MxYXezgoLzE6Jig4AX84Ji8xOiYmONsTq4SGnhFxDFZnqCBuZXFbsjNMBTc2LzgtNDIyNDYvOC00MjIBwniIhno5Nm8bWHcUbV8AAAAABABqBNECyQeNAAsAFwAbACMAYUBBBgASDBogDCMAGwYkJR6AXyNvI38jAwAjECMCIyMbHxgvGM8YAx8YLxg/GH8YjxgFGEATFkgYGA8DAxUPCV8JAgkAL10zMxEzMy8rXXEzMy9dXRrMERIBFzkRMxEzMTATNDYzMhYVFAYjIiYlNDYzMhYVFAYjIiYBIRUhNzY3MxUGByN7OCgvMTomKDgBfzgmLzE6JiY4/nACX/2hx1ItsltxZQU3Ni84LTQyMjQ2LzgtNDIyAWqS5mxgFHdYAAAEAGoE0QLJB40ACwAXABsAIwBhQEEGABIMGgwcHwAbBiQlIIBfHW8dfx0DAB0QHQIdHRsfGC8YzxgDHxgvGD8YfxiPGAUYQBMWSBgYDwMDFQ8JXwkCCQAvXTMzETMzLytdcTMzL11dGs0REgEXOREzETMxMBM0NjMyFhUUBiMiJiU0NjMyFhUUBiMiJgEhFSElIyYnNTMWF3s4KC8xOiYoOAF/OCYvMTomJjj+cAJf/aEBiGVxW7IzTAU3Ni84LTQyMjQ2LzgtNDIyAWqSz1h3FG1fAAMAXgS6AtUHOwANAB0AJQBNQC8RGCIAACUVDhgGBiYnERsgFSCAJUAME0gAJQElDQZACQxIBgYKDwNfA38DzwMEAwAvXTMzLysz1F0rGszGEMQyERIBFzkRMxEzMTABBgYjIiYnMxYWMzI2NwMUBiMUFhcVJiY1NDYzMhYTNjczFQYHIwLVE6qFiJwRcQtQbl1dDu0+LUE4eGk3MS0+OToixUOIVgWwdoB+eDUvMjIBKTEvKzADTAx6bjRELf7ulZYVhqkAAAAAAwBeBLoC1Qc7AA0AFQAlAEpALhkgAA4RHRYgBgcmJxkjEh0SgA9ADBNIAA8BDw0GQAkMSAYGCg8DXwN/A88DBAMAL10zMy8rM9ZdKxrNxhDEMhESARc5ETMxMAEGBiMiJiczFhYzMjY3NyMmJzUzFhclFAYjFBYXFSYmNTQ2MzIWAtUTqoWInBFxC1BuXV0ORFaIQ8UZQ/66PS1BOHhqNzIsPgWwdoB+eDUvMjIzqYYVf6zdMS8rMANMDHpuNEQuAAAAAAMAXgS6AtUHOwANAB0AJQBNQC8UGyIAACUbDhcGBiYnGxEgFyCAJUAME0gAJQElDQZACQxIBgYKDwNfA38DzwMEAwAvXTMzLysz1l0rGszGEMQyERIBFzkRMxEzMTABBgYjIiYnMxYWMzI2NwE0NjMyFhUUBgc1NjY1IiYXNjczFQYHIwLVE6qFiJwRcQtQbl1dDv5QPi0xN2l4OEEtPv46IsVDiFYFsHaAfng1LzIyASk1LUQ0bnoMTAMwKy+slZYVhqkAAAAAAwBeBLoC1Qc7AA0AFQAlAEpALhwjAA4RIxYfBgcmJyMZEh8SgA9ADBNIAA8BDw0GQAkMSAYGCg8DXwN/A88DBAMAL10zMy8rM9ZdKxrNxhDEMhESARc5ETMxMAEGBiMiJiczFhYzMjY3NyMmJzUzFhclNDYzMhYVFAYHNTY2NSImAtUTqoWInBFxC1BuXV0OUlaIQ8UZQ/3sPiwyN2p4OEEtPQWwdoB+eDUvMjIzqYYVf6zdNC5ENG56DEwDMCsvAAAAAAEAAP/pB4kFtgAbAEZAJQ4YAhoAABsCBQQFHB0FAhgCGAQWGQMBBBIWB2lZFgMMEWtZDBMAPysAGD8rABg/Mz8REjk5ERI5ERIBFzkRMxEzMjEwISMBASMBASECAgYGIyInNRYzMjYSEhMhAQEzAQeJ0f59/nfDAeb+pP7nOU5RjW5FQjQ9O1E+VDQCEgFmAWnC/jwCe/2FAvoCGP42/hL6dxmaG20BFwIiAY/9wwI9/UgAAAEADv/0BlQESgAYAEVAJRAWEhQUExYAGAgGGRoAFhAWEBgOEQ8VGBUOAl1ZDg8GC15ZBhYAPysAGD8rABg/Mz8REjk5ERI5ERIBFzkRMxEzMTABASMCAgYjIic1FjMyEhMhAQEzAQEjAQEjA/D+7+kaX5p2PSIZH2yFIwHoARoBGcr+hgGPzf7V/tHLAjEBgf6e/mO/DIkGAcwB+/5iAZ795/3PAbb+SgAAAAACAMcAAAZqBbYAEAAZAE9AKhEMDA0CFQUIBAYGCBUNBBobCAUCAw4LCxFrWQsLDQ4DAwcNEg4Za1kOAwA/KwAYPzM/ERI5LysREgAXORESARc5ETMRMxEzETMRMzEwARUHATMBASMBBiEjESMRISABMzI2NTQmIyMEbwIBJsP+LQHn0P5Nlf7VqLgBgwIl/RCT2sS2wboECBIRAdH9SP0CAsGI/ccFtv0hjZyNjAAAAAIArv4UBmYEXgAaACcAV0AwFRgfCwMDBwcIEhgYJRQWFiUIAygpDAISFRgFAA8TCQ8XFQgbDxtdWQ8QACJdWQAWAD8rABg/KwAYPz8/MxESFzkREgEXOREzETMRMxEzERczETMxMAUiJyMXFhURIxEzFzM2NjMyFhcBMwEBIwEGBgMiBgcVFBYzMjY1NCYCtt13DAQItJQYCECobrbmHQEWy/6FAY/M/tkZ6dmjkQKUpoqbmxSfKU49/j0GNpZaUNzSAZr95/3PAa7Z6QPbuMUj38fgyMnVAAIALQAABskFtgAVAB0AfEBGDhoNGhECBgYdCQAICAQJEQQeHwIFaVnYAgE6AgEJAgEPAAKgAgISAwICCRQOCxwLa1kcHAkUDRIBFxQXa1kUAwkGaVkJEgA/KwAYPysRADMYPxESOS8rEQAzERI5GC9fXl1eXV1dKxESARc5ETMRMzMRMxEzMxEzMTABIREhFSERIRUhESEBIwEmJjU0JCEhBSMiBhUQITMGyf2HAlT9rAJ5/NX+4f6F1wGam5IBEQERBA381d23sgFx1QUU/jig/faiAl79ogJ/Ms6extOdgIX+5gAAAAADACH/7AaiBF4AHwAnAC4AhUBIAiABBRgQIAUrIwoRER4QLCweIwUELzAKGwwrEV5ZGSsBAw8rARAGKysbDAIfIh9dWSIiCAEVDChdWQwQCCVdWQgPGxRhWRsWAD8rABg/KwAYPysAGD8SOS8rEQAzERI5GC9fXl1fXSsREgA5ERIBFzkRMxEzETMRMxEzETMRMxEzMTAzIwEmJjU0NjMhFTYzMhIVFSEWFjMyNjcVBgYjIiQnIQEUITMRISIGJSIGByEmJvLRATl+gs63Ac12u8/2/RAFtKVYnG1Yom/b/vgZ/wD+/AEM7v7wc3cD04GWDgIvAooBzSCid5isd4v+9eRtu8IfLZ4mIe/ZAVC6AWpacaaUmqAAAQDHAAAE8gW2ABIAPUAgDQAAFAcDAwQRCA4LDwsIEgkEBhMUEgcCAwQMBQMBBBIAPzM/MxIXORESARc5ETMRMxEzETMRMxEzMTAhIwERIxEzEQEnNxc3MwEXBycBBPLe/Wu4uAFqsGeuttH+7Lhpsv78AuX9GwW2/TwBjaxtqMb+y7BtrP7gAAEArgAABCMESgASADtAHwEKCwkFBQYAChANAg8RDQoGBhMUAQQJAwYOBw8DBhUAPzM/MxIXORESARc5ETMRMxEzETMzETMxMAEHASMBESMRMxETJzcXNzMHFwcCyaIB/NH+ELS0/o1ah4PF4Z9WAvC3/ccCLf3TBEr96wEhjViHlvyeWgAAAAABAAL+AAfJBbYALABUQC4MABkZGgUSEhojAy0uAhVpWQ8CAQsDAgIaKwkPaVkJHBoSKxxpWSsDISZrWSETAD8rABg/KwAYPz8rERIAORgvX15dKxESARc5ETMRMxEzMzEwATYzIAARFAIGIyImJzUWMzI2NTQmIyIGBxEjESECAgYGIyInNRYzMjY2EhMhBH1MfwExAVCB9KhNhkqGfri98N0rehm4/ps4VVOMbUVAND06UThHSAK4AxcM/qT+ys3+15sVHKQx/fL2+AcH/Y0FFP5X/fD9dRmaGWzyAcUCEAAAAAABAA7+CgZQBEoAJABWQDEJABQUFQQODhUdAyUmAhFhWQ8CHwKfAgMLAwICIxUVIxddWSMPGyBkWRsWBwxhWQccAD8rABg/KwAYPysAGD8SOS9fXl0rERIBFzkRMxEzETMzMTABNjMgERACIyInNRYzIBE0JiMiBxEjESECAgYjIic1FjMyEhMhA6pSOwIZ6tCMamx/AQuutU47tP8AG2CWdkMeHRlriCUCTgJxDP2+/vT+2zyfPQGV18sO/i8Dsv6b/mO+DoUIAckCBAABAMf+AAhxBbYAJAB1QEMMGSEdHR4AGRkiGgUSEhoeAyUmAhVpWQ8CAQsDIRxpWdghATohAQkhAQ8AIaAhAhIDAiECIR4fCQ9pWQkcIx8DGh4SAD8zPzM/KxESADk5GC8vX15dXl1dXSsAX15dKxESARc5ETMRMzMRMxEzETMRMzEwATYzIAARFAIGIyImJzUWMzI2NTQmIyIGBxEjESERIxEzESERMwUlTH8BMQFQgfSoTYZKhn64vfDdK3oZuP0SuLgC7rgDFwz+pP7Kzf7XmxUcpDH98vb4Bwf9jQKq/VYFtv2WAmoAAQCu/goGvARKACAAhUBQChUdGRkaABUVHhYFDw8WGgMhIgISYVkPAh8CnwIDCwMdGF1ZhB2UHQIGRR0BAx8dAQ0d3R3tHQMQBQ8dARQDAh0CHRofGw8WGhUIDWFZCBwAPysAGD8zPzMSOTkvL19eXV9eXV1fXV9dKwBfXl0rERIBFzkRMxEzMxEzETMRMxEzMTABNjMgABEQAiMiJzUWMyARNCYjIgcRIxEhESMRMxEhETMEF1I7AQ0BC+rQjGpufgEKrbVNPLX+ALS0AgC1AnEM/uH+3f70/ts8nz0BldfLDv4vAen+FwRK/jcByQAAAQDH/n8FwQW2AAsANkAcBwgLBAECAgQIAwwNCBICIgkGaVkJAwQLaVkEEgA/KwAYPysAGD8/ERIBFzkRMxEzETMxMCURIxEjESERIxEhEQXBsbb9JbgESaT92wGBBRT67AW2+u4AAAABAK7+hQTwBEoACwA2QBwHCAsEAQICBAgDDA0CIggVCQZhWQkPBAtdWQQVAD8rABg/KwAYPz8REgEXOREzETMRMzEwJREjESMRIREjESERBPCzo/3ItAOgmP3tAXsDsPxQBEr8TgAAAAEAx/5/BaAFtgAWAD5AIRAMDA0WBAECAgQNAxcYEghpWRISBA4DDRICIgQWaVkEEgA/KwAYPz8/EjkvKxESARc5ETMRMxEzETMxMCURIxEjETQmIyIGBxEjETMRJDMyFhUDBaCwt3yMZrWXuLgBAsPO4AKk/dsBgQItdnYiMv07Bbb9qFy/rf5WAAAAAQCu/oUE7gYUABoAR0AlEhAMDA0aBAECAgQNAxscEhIEFgIiDgANFRYIXVkWEAQaXVkEFQA/KwAYPysAGD8/PxESOS8REgEXOREzETMRMxEzMzEwJREjESMRNCYjIgYVESMRMxEUBzM2NjMyFhURBO6zoXd/p5u0tAoMMbRxyMqY/e0BewK+hoO61v3JBhT+OFpAUFq/0v3NAAAAAQCwBNcD7AWkAA0AKEAUDAEODwUJCQMHC4B/DQENQAkOSA0ALytdGs0yMjkRMxESATk5MTABFQcjJyMHIycjByMnNQPsUiExuzEhMbgxIVAFpCGsZmZmZqwhAAABACn/7ARWBbYAHABxQEgFBgIQFBcaBQwOEgwYAAYGHh0AGwENEA8GDhwZFxoRFBMGEhhfHG8cAgAcgByQHAMLAwUOEhwYGBwSDgUFChUGCgJzWQoZDBgAPz8rABg/Ehc5Ly8vLy9fXl1dERIXORESFzkREgEXOREXMxEzMTABBREyEhEzFAIEIyInEQc1NzUHNTcRMxElFQUVJQNa/lr1/bCj/tfOdETb29vbsAGm/loBpgNqk/2oASYBKO3+sKQUApxMhUqiSoVKAXD+zZGFkaKSAAIAwQAABAoFtgADAAcAABMhESE3IREhwQNJ/LdoAnn9hwW2+kpoBOYAAAAAA/0wBPoAjQcsAAsAGQAaAAABMhYVFAYjIiY1NDYFBgYjIAM3FhYzMjY2NwH+5Ck9PSkpPT0B0ivQrP7PhY02jmpKZUEb/q4HLDsrKzs7Kys7RLy7AXcxlok/eGv93gAAAv6FBPr/ZwakAA8AEAAAATIWFhUUBgYjIiYmNTQ2NgP+9h4zICEzHR4zIB80NAakHzggITgeHzchIDgf/lYAAAMAnwBuAZkE+gAPAB8AIAAAATIWFhUUBgYjIiYmNTQ2NhMyFhYVFAYGIyImJjU0NjYTARwhOSMjOSEhOSMjOSEhOSMjOSEhOSMjOSwD/iI9JCQ9IiI9JCQ9Iv12Ij0kJD0iIj0kJD0iA4YA//8APAAABjsHLAImCXIAAAEHCbEGHQAAAAAAAAABADwAAAY7BQ8AOwAAATYzMhYWFRQGBxYzMjY3ESM1IRUjESMRBiMjFhUUBgYjIiYmJzcWFjMyNjU0JicGByc2NjU0JiYjIgYHASuanmiWS0pKNm9Ifz2WAhDVpWypCR9XkVt4xq9WkGHOdlNcSE42QAl+gi1IK0F2VQTBTkp/UFGBKzgvNQFDkpL7mAJtND87XoE9dPfMNO/lTEpCeDwKApADUlUwPhwgKwAAAQA8AAAITQUPAD8AAAE2MzIWFhUUBgcWMzI2NxEjNSEVIxEjESERIxEGIyMWFRQGBiMiJiYnNxYWMzI2NTQmJwYHJzY2NTQmJiMiBgcBK5qeaJZLSko2b0h/PZYEItWl/pOlbKkJH1eRW3jGr1aQYc52U1xITjZACX6CLUgrQXZVBMFOSn9QUYErOC81AUOSkvuYBGj7mAJtND87XoE9dPfMNO/lTEpCeDwKApADUlUwPhwgKwAAAAEAAP8fA+gE+gA8AAABJiY1ND4CMzM1ITUhFSMRISIOAxUUFhc2MzIWFhUUDgIHFhcHJicmJjU0NjMyFhcyNjY1NCYjIgYBcn6OLFRxV8D9kgPo1f6WKzggFgxANUtOZp9aL01iM4WVQ+HEg3g6NCpWP0p8RWJTJFsBsCObYzxUOx2vkpL+vwcQFyATKkAOGEaHXUBiSTANPiiPWogFPj0wOio1LVI2Q1ATAP//AAD/HwP1BywCJgl0AAABBwoPA8oAAAAAAAAAAgAAAAAEgAT6ACkAKgAAARYVFAYHHgIVFAYGIyImAic3HgMzMjU0JicGByc2NjU0JichNSEVJQOfQlNPP00yXJ5jedDCWZAqYHCBSb9WWTRQCZeSNzX9MASA/mgEaE9iW44uOGSBS2aNRX4BHOY0dMaRUrFNjkQLApAEYmU4XB+SkpIAAgAA//8GPQT6ADsAPAAAARYVFAYHFhc2NjMyFhYVFAcnNjY1NCYjIgcWFRQGBiMiJgInNx4DMzI1NCYnBgcnNjY1NCYnITUhFSUDn0JTTycrRH1EWIlOhZQ4PE9LU1UfXJ5jedDCWZAqYHCBSb9WWTRQCZeSNzX9MAY9/KsEaE9iW44uIjExLUmLXMa2VUCSTVVUS0lQZo1FfgEc5jR0xpFSsU2ORAsCkARiZThcH5KSkgAAAwAA/7sG8AT6AEgASQBKAAATNjYzMhYWFxEhNSEVIRE2NyYmNTQ2MzIWFhUUBgcWFwcGBhUUHgIzMjY3FwYGIyImJjU0NjcmJwYGBxEjEQEnAS4CIyIGBwEBN1eWS1GAflD88gbw/MOgVwgKTzozTigyLy1CGGhhHC47IDleUTRIhURllE5jXSAZKrVVpf33YgH4Q1VYOD53TgKeArIDVSomM29nAcySkv45ChoTNxA3QypHLDJJFFFgQxBSRiMyIQ8ZKo4kIkiEVVSKJjY0ESAF/ewB3/6migExVU8nJScCO/0GAAAAAAMAAP8cBbkE+gBNAE4ATwAABQYGIyImJjU0Njc2NjU0JiMiBgcnNjcmJiMiBhUUHgQXBy4ENTQ2NjMyFhc2NxEhNSEVIREeAxUUBgcGBhUUHgIzMjY3AQEFuUiFRGWUToSCFR5VTmaNK50ZJzxiMk9bCxovT35VbnJ9ZD0dVphXVpZIYov8PQW2/rEpTjwlKyh6ehwuOyA5XlH+PgGDniQiSIRVaZIkK2I7V16YljFgRS0kWkwcNjlFVG9Ha2F0dGplNV2LTDk6WxMBBZKS/vIMMU5sR0ydShNYTCMyIQ8ZKgUK/HYAAP//AAD+iASLBywCJgl8AAABBwmwA7kAAAAAAAD//wAA/ogEiwcsAiYJfAAAAQcJsQSAAAAAAAAAAAIAAP6IBIsE+gA1ADYAAAEVIxEUDgQHJz4CNREhERQeAhcXHgIVFAYHJz4DNTQuAicnLgU1ESM1IQSLwBAfMEVaNx1ORxr+HhElQ1XhUmc1MyyUHxYPBhEqUF6xO0g2IxYMogMoBPqS/qNQZUc5LBwFmAs2TlsBXf5qdWhCOjGALlZoST+KQlUyKCogDyAqKjg3ZiI3OzxEWUEB4ZIAAP//AAD+iASLBywCJgl8AAABBwmyBIAAAAAAAAD//wA8AAAIgAcsAiYJcwAAAQcJsAfzAAAAAAAA//8APAAACE0HLAImCXMAAAEHCbEILwAAAAAAAP//ADwAAAhNBywCJglzAAABBwmyCC8AAAAAAAD//wA8AAAITQcsAiYJcwAAAQcJswgvAAAAAAAAAAIAAAAABjcE+gA5ADoAAAEVIRE2NjMyFhYVFAYHJzY2NTQmIyIGBxEjEQYGIyImJjU0NjYzMhYXByYmIyIGFRQWFjMyNjcRITUhBjf9ZTN2SVeITkVAlDRAUEg3cjmlR5FQZatgZrh3KHIoDCNkJnWDOl42So9L/QkC9wT6kv5iMjZLlmdo1V5VQq1TXl9MSf3yAWU9N1ShbGuiVwwJlQgNbF5HYS5GTQJUkgACAAAAAAapBPoAHgBIAAABIxEjNQYGIyIuAicmJjU0NjMyFhc2NjU0JichNSEBNQYGIyImJjU0NjYzMhYXByYmIyIGFRQWMzI2NxEhFhUUBgceAjMyNgap1aVNrVt20dPLYB8lQ0AzTR8kJhQO/mUGqf6GOIVGX5pZWaJqKWwZDB9LJWVua1w9djb9ESKCiWvG1H1npQRo+5hfKSc3f8h+Kk4xMT8vKiByTT1jHpL7+6AhJU6SX2GPSw0IjQcOX1ZSWTExAipPcJvMPH6bTCgAAQAAAAAEnwT6ABUAAAEhERQGIyImJjU0NjMzESE1IRUjESMDJv6FNS8zdko9Pzf++QSf1aQEaP13NjxXfTU1NAGJkpL7mAAAAgAAAAAE2AT6ABYALQAAARUjESMRBgYjIiYmNTQ2NyYmNTQ3IzUFBhUUFhc2MzIXByYjIgYVFBYzMjY3EQTY1aVUmV9ooVgsKFNbFGsBJytUSThKMioNGiBocmxhWplIBPqS+5gBUjYrS4taPGwlKI9UQC+SkjFERlcLEAaPA1VOT1ZGTAJVAAQAAAAABbIE+gAzAEMARABFAAABJiY1ND4CMzM1ITUhFSERISIGBhUUFhc2MzIWFhUUBgYjIiYmJzceAjMyNjU0JiMiBgEyFhYVFAYGIyImJjU0NjYBAwKAfo4sVHFX2fxrBbL+iP59QD8mQDVKZ2upYGG8g5X74WKNUbPFd4CAbl4xYAI+HTEdHTEdHTEdHTH+y4cBsCObYzxUOx2vkpL+vxAuIypADhhFiF1bkFFl5r5Cn8FUVlVEThIBbB41Hh41Hh41Hh41HgHM+wYAAAABAAAAAAUxBPoAJgAAARUjESMRDgMjIiYmNTQ2NwU1ITIXByMiBhUUFhYzMjY2NxEhNQUx1aQnO0dTMmilXS8r/tMCWVEmDkJ7jTlYND1oVjP8SAT6kvuYAVIZHxoPT5RfOmUlA5QDj2hcOlIlJDwzAlSSAAMAAAAABbkE+gBLAEwATQAAAQcmJjU0Njc1ITUhFSEVFhYVFAYGBCMiJiY1NDY3JiY1NDY2MzIWFwcmJiMiBhUUFhc2MzIXByYjIgYVFBYzMiQ2NTQmJiMiBhUUFgMDBEdDmqNzafxdBbn+j3R9Y8L+7KKBtVkfIVtcVJdiKWgXDBlKK1pcTVI6Sy4oDRcdYGJ6cJsBC5QrUTc9Q3Q2dwIWgSumc2WJEo+SkpYftYhz4LlqTYdZLFslLIZUUXQ6DQiNBw5CQjRHFBMGjwNJRk9RhOOEPmc7RDpHZgLK+wYAAQAAAAAGDgT6ACQAAAEjESMRISImJxYWFRQOAiMiJgInNxISMzI2NTQmJzchNSE1IQYO1aX+5yY6E09UMVNvPXrGrU6QYcx2SU1vajoCKvtsBg4EaPuYAvYBA0SkU0hrRyKQASXdNP7j/u5MS1GeRITgkgACAAD/HwYIBPoAMwBKAAABJiY1ND4CMzM1ITUhFSMRIxEGBiMWFRQOAgcWFwcmJyYmNTQ2MzIWFzI2NjU0JiMiBgERISIOAxUUFhc2MzIWFxYzMjY3EQFyfo4sVHFXwP2SBgjVpTqOWgUvTWIzhZVD4cSDeDo0KlY/SnxFYlMkWwF4/pYrOCAWDEA1S05Mfi4vQ02DQgGwI5tjPFQ7Ha+SkvuYAbkgIh0eQGJJMA0+KI9aiAU+PTA6KjUtUjZDUBMCp/6/BxAXIBMqQA4YJiUUND0B8AAAAAABAAAAAAYNBPoAKwAAATY2MzIWFhczMjcRITUhFSMRIxEGIyMOAiMiJCc3HgIzMjY1NCYjIgYHAYk4hz5blV0NEVpI+20GDdWlUGMED2OjYbb+7mKMOnSJXG9/aWAzXS8DXBkdP3lUIgHAkpL7mAILFU52QOTfNoSXR2pZVVobGAAAAAADAAAAAAQmBPoAIQAiACMAAAEjIgYGFRQeAjMyNjY3FwYGIyImJjU0NjYzMxEhNSEVIScRAvdwhKdZNFh0QD5rY1Q4Y85gi+R8geSGCP2uBCb+0aUCy0J8VUttRiITJSuZMTFyzoJ7u2QBDJKSkvsGAAAAAAQAAAAABMwE+gAWACYAJwAoAAABIREWFhUUBgYjIiYmNTQ2NjMzESE1IQEjIgYGFRQeAjMyNjU0JgMTBMz+K6mve+GTke6CgOaGB/2uBMz98khvpl40W31JobJ34kIEaP7mSOGPg7hbcc2EebxlAQyS/dE+flpLbUgjh4BjnQJh+wYAAAMAAAAABSME+gAzADQANQAAASYmNTQ+AjMzNSE1IRUjESEiBgYVFBYXNjMyFhYVFAYGIyImJic3HgIzMjY1NCYjIgYTAwKAfo4sVHFX2fxrBSPp/n1APyZANUpna6lgYbyDlfvhYo1Rs8V3gIBuXjFg7IcBsCObYzxUOx2vkpL+vxAuIypADhhFiF1bkFFl5r5Cn8FUVlVEThIDOPsGAAAEAAAAAASeBPoAJgAzADQANQAAASMiDgIVFBYXJjU0NjYzMh4CFRQGBiMiJiY1NDY2NzUhNSEVIQM2NjU0JiMiBgYVFBYDEQMVUXOXezyCeB9BiWJMbkkja8qHlveIf/Of/ZAEnv53f3aJRzkvSioREwMBIFR0RYKhG05eRnlNLUphNF6TUXfYioDEcATXkpL8JQJiUT1GKEwyKUUESfsGAAAAAAIAAAAABeYE+gAUACIAAAEjESMRIREUDgMjIiYmNREjNSEFIREUHgMzMj4CNQXm1aX+8Rw7VWxDdpxOogXm/NP+jQ8dKzomQEknDARo+5gEaP4qT29WNxtYspoBmJKS/mpJWT0lESdHWk0AAAEAAP/nBK0E+gAcAAABIyIGBhUUFhYXBy4CNTQ+AjMhESE1IRUjESMDM9+BgVA/fmtuj4VLN3anfQEF/M0ErdWlAqAbUk1BeoNWa3ubnlVDe1woATaSkvuYAAAAAgBfAAAFQAUOAC8AMAAAASQ1NDY2MzIWFhUUBgcWFjMyNjcRIzUhFSMRIxEGIyImJic2NjU0JiYjIgYVFBYXAwF+/uFFdElfnVOboDN5YlmcT6gCIdWklbxzvIEds68tTjE1QU9X5gMhKd1LajJUl2OPwj9QQUVPAjWSkvuYAXFjWLB+JIx4Ql4vMyw0Pgn+UQAAAAMAAP+KBEYE+gA0ADUANgAABSYmJwYjIi4DNTQ+BDMzNSE1IRUhESMiBgYVFBYWMzI3JiY1NDYzMhYWFRQGBxYXARMDTytRFjk4XKF4TioZO1htilAK/aQERv67Y56mV0iVbBUeBwVNPDlTKDE4Olb+hdB2RZUxCSpKYn1OOV9bQysXw5KS/q02cFZKcUADEigOP0QwSSg4TBZwcwUs+wYAAAAAAQBXAAAFCgUPAD8AAAEVIxEjEQYGIyImJjU0NjcmJjU0NjYzMhYWFRQGByc2NTQmIyIGBhUUHgIXNjMyFwcmIyIGFRQWMzI2NxEjNQUK1aVUmV9ooVgkKGttS4hVR3RDKSh9KTIuJj8lJT5PKTpKMioNGiBocmxhWplIfwT6kvuYATQ2K0uLWjZnKCmXalCDSjNgPTBeJU8oMicvKkotM0QsFgQQBo8DVU5PVkZMAnOSAAAAAQAAAAAEjgT6ABQAABE1IRUjESMRIRUUBiMiJiY1NDMhEQSO1aX+jzcyMHJMfAJMBGiSkvuYAmJuNztVfjZpAXQAAAACAAD/0wSOBPoAFAAkAAARNSEVIxEjESEVFAYjIiYmNTQzIREBMhYWFRQGBiMiJiY1NDY2BI7Vpf6PNzIwckx8Akz+Xx4zICEzHR4zIB80BGiSkvuYAmJuNztVfjZpAXT8WR84ICE4Hh83ISA4HwAAAgAAAAAEqgT6ABAAHAAAASMRIxEGBiMiLgI1ESM1IQURFB4DMzI2NxEEqtWkT4xNVYxfJ6IEqvycDh8uRjFJkz0EaPuYAYcyKTpvk3YBipKS/otiYDcpFEY/AiYAAAAAAwAAAAAGSQT6ACQAMQAyAAAhEQYjIi4CNREjNSEVIRE2NjMyFhYVFAYHJzY2NTQmIyIGBxEBERQeBDMyNjcRNQMJeJVNi1snogZJ/WUzdklXiE5FQJQ0QFBIN3I5/ZgGEiMnNB5Kg0IBjGA7b4x0AZKSkv5iMjZLlmdo1V5VQq1TXl9MSf3yBGj+eTxSPjAbDT1DAiuSAAACAAAAAASvBPoAHgAnAAABFSMRIxEGBiMiJiY1NDY2MzIWFwcmIyIHATY3ESE1AQEGFRQWFjMyBK/VpE6hWnK2Zm/LhTOHHAx5SykjARMhKvzKAnb+4lRAakFIBPqS+5gBUjMuVp1mcqVVDQiNFQX+qBgpAluS/JUBZjtvRl8tAAEAXwAABb0FDgAuAAABIREhNSEVIxEjESEVFAYjIiYmNTQ2MzM1NCYmIyIGFRQWFwckNTQ2NjMyHgIVAqcBnP71AoXVpf5kNi0wdFE/PTwmTTo4OWlsD/6wRnZJR3dfJgKRAdeSkvuYAf9KPERTfzIrLdh6cTc1Kj47BZIc8E1oMShmlmwAAAACAAAAAATmBPoAFQAZAAABIxEjESEVFAYjIiYmNTQ2MzMRITUhAREhEQTm1aX+PDYtMHRRQD03/vwE5v6G/jwEaPuYAf9KPERTfzIrLQHXkv2XAdf+KQAAAwAAAAAEwgT6ABIAIAAhAAABBgYjIiYnNjY1NCcjNSEVIxEjARYWMzI2NxEhFhYVFAYFA0lGpVix6R6EdUv8BMLVpP2/KX9iVp1E/mgcH3P+1QFwLzPu1hxkVl1jkpL7mAJpZ2NJSwI1NG81bJdNAAIAAP/nA2QE+gAjACQAAAEjFhUUDgIHFhYXBy4FNTQ2NjMyFhc2NjU0JichNSEBA2TVIjVdf0pT84xuVa6zdS8PGzstNE0fT0kSEP4WA2T+VgRoT3BUgGNMHmPjbW5GoLqQUDQdGzEgLyopeVIoXyKS+wYAAwAA/9MDZAT6ACMAJAA0AAABIxYVFA4CBxYWFwcuBTU0NjYzMhYXNjY1NCYnITUhAScyFhYVFAYGIyImJjU0NjYDZNUiNV1/SlPzjG5VrrN1Lw8bOy00TR9PSRIQ/hYDZP5W3R4zICEzHR4zIB80BGhPcFSAY0weY+NtbkagupBQNB0bMSAvKil5UihfIpL7BsEfOCAhOB4fNyEgOB8AAAEAAP/nBYsE+gAwAAABMzIXESE1IRUjESMRJiMiBgcnNjcmJiMiBhUUHgQXBy4ENTQ2NjMyFhc2BAAJBQT77gWL1aQJFmKMLJ0ZJzxiMk9bCxovT35VbnJ9ZD0dVphXVJdJeANoAQEBkpL7mALXApiWMWBFLSRaTBw2OUVUb0drYXR0amU1XYtMODpyAAAAAAUAAAAABjME+gAjADIAQQBCAEMAAAEWFhUUBgYjIiYmJwYGIyImJjU0NjYzMhYWFzY2NzUhNSEVIQEWFjMyNjY1NCYmIyIGBycmJiMiBgYVFBYWMzI2NwERBMB1gVikbEd5Zzs2j2xhnV9cpWdGdmc+MXVP++QGM/6N/nJKe0I6YjwzVjVJbTWHTXlAOmI8NFQ0SG81AXEDdSTHi2ytZCdEN1BSWLB7ca5fJUM5SE0J6JKS/ThMQDFtUlRqL2J2S04/MW1SU2svY3UDDvsGAP//AAD+5gYzBPoCJgmgAAABBwmnA+8AcAAAAAAAAQAAAAAEkQT6ACIAAAEVIxEjEQYGIyImJjU0NjYzMhYXByYjIgYVFBYWMzI3ESE1BJHVpEicV26uYmrCfzCBGwxyRoGNPGU9rIr86AT6kvuYAVIyL1adZnKlVQ0IlRVxZUNcK5ICVZIAAAAAAwBR//4FjwUOAC4ANgA3AAABNjY1NCYmIyIGFRQWFwcuAjU0NjYzMhYWFRQGBxYXByYmJwYjIiYmNTQ2MzIWASMRIxEjNSEBAYhtfDdjQDtBjIIifK5WPoBdaKtejX6moYNiqFM4Pi9NLEY9KVEEQdWltgIw+2oCNCS2fkl1QToxSk4DkAhTglBGajxgs3aO70SR2luKwUYLID4oOUElAgn7mARokv0cAAAAAAMAAAAABL4E+gAQABMAHQAAASMRIxEGBiMiLgI1ESM1IQUBEQMBERQeAzMyBL7VpFmUT1WMXyeiBL785QGiZP5lDh8uRjFkBGj7mAGKNCo6b5N2AYqSkv3hAh/9jgIY/uViYDcpFAAAAgAA/+cFfQT6ACQALQAAASMRIxEjIicGBxYWFwcuAicmJjU0NjYzMhYXNjY1NCYnITUhARYzIREhFhUUBX3Vpffdci05VOJ9bnLeqigYERs7LTNNHyQmFA7+ZQV9/J04dgE7/j0iBGj7mAITLCEYbOBlbmPhykIpOCAbMSAvKiByTT1jHpL9sgcBw09woQAAAQAA/t0EXgT6ADYAACU2NjU0IyIGFRQeAhcHLgQ1NDY3JjU0PgIzITUhNSEVIxEhIgYGFRQXNjMyFhYVFAYHApJUVP2MlxxDgXlub29cORxGOmgyVW5TAQ79HARe1f5IQD8mX1hjfrZgioiOF1A7o3FlMFJYemFrXWZoZGY5UX4rVnY/Vzgar5KS/r8QLiNNLRpNi1V0nCUAAAAAAf1M/nb+Lv9kAA8AAAUyFhYVFAYGIyImJjU0Njb9vR4zICEzHR4zIB80nB84ICE4Hh83ISA4HwABADUAVQNdBPoALwAAASMiDgMVFBYWFx4CFRQGBiMiLgInNx4CMzI2NTQuAicuAjU0PgIzMwMx0T46JRwRIExgZGgvT5FfTn1wczuNP19tQU5YFS5NT1hiKi5ghm6+BGgGDBkkGyI7S01Rd3dMWIdKKlqXakZxfEdPRylBPklARm9iNz9eQxkAAQAAAAACMAT6AAcAAAEjESMRIzUhAjDVpbYCMARo+5gEaJL//wAAAAAGDQcsAwYLbQAAAAAAAAAB/kAAAAIwBywAHgAAASYmNTQ2NjMyFhYXMxUjESMRIzUzLgIjIgYVFBYX/ogkJEyTZXahdzzi1aW2sypPZUlQWB8lBOtHhEVYi05v6dqS+5gEaJKerVVjWjhwSgAAAAAC/Fz93AAuABYAHgAfAAAFNjYzMhYWFRQGBiMiJiYnNx4CMzI2NTQmJiMiBgc3/gU2dDplk01XpnKA2LVWflWOnmRsbC5LKy9XM2oZFxhIgVZTgUdSlnBXa3g6TksrOx0TGKEAAv1X/dwBKAAWAB8AIAAAAwYjIiYmNTQ2NjMyHgIXBy4CIyIGFRQeAjMyNjcDgHFxZZROVaBpYaiNilN9UJKnYmVtHC48Hy5UN6j+Cy9IhFVNgUsyWo9+UHiWUE5EJzUhDhIaAWYAAAAC/eD93AA4ACQAFwAYAAATBgYjIiYmNTQ2NjcXBgYVFB4CMzI2NwE4SIVEZZROUaVzD3JvHC47IDleUf6g/iIkIkiEVU2BUwaHDFdIIzIhDxkqAVAAAAAAAv3g/NoAiAAkACgAKQAAEwYGIyImJjU0NyYmNTQ2NjcXBgYVFBYzMjY3FwYGIyInBhUUFjMyNjcBiEiFRGqUSSA2OlKlcg9vclZQOV1SM0iFRCQhG1VROV1S/k/9ICQiRnlOOjIlbEFCcEgFhwlIODU5FiOBJCIFHyg2OBYjAl8AAAAAA/0wBPoAjQcsAA0ADgAPAAATBgYjIAM3FhYzMjY2NwETjSvQrP7PhZE2impLYz8a/rJuBui8uwF3MZSHP3do/d4CMgAC/DME6/8/BywAGQAaAAABLgIjIgcGIyImJic3HgIzMjc2MzIWFhcn/qQYMUM1GxsaG1BxXiaHHzJDNBshIiVPblcmmwTraWktAgMxemsxVEMeBANIrJ8PAAAAAAL8ugTr/0AHLAAQABEAAAEuAiMiBgcnNjYzMh4CFyf+pSxTZEgjQCsyLVk0V4FpXi2cBOuos08LEZUQDjuB3qcPAAAAAvyTBOv/QAcsACIAIwAAAS4DIyIHJzY2MzIeAxc3LgIjIgYHJzY2MzIeAhcn/pItSD89MUleNi9mPSlIQjUiEgkrTlhBI0ArMDFTNleBaV0unATrQEghDCyOExoRJCwlHwNtczELEY0QDTuA26sP////BgAAAmMHLAImCakAAAEHCbAB1gAAAAAAAP///kUAAAIwBywCJgmpAAABBwmxAhIAAAAAAAD///7MAAACMAcsAiYJqQAAAQcJsgISAAAAAAAA///+pQAAAjAHLAImCakAAAEHCbMCEgAAAAAAAAAC/pH93AE2AAAADwAQAAAFNjMyHgMXBy4CIyIHJ/6RLTVDeHBta0B1SIaXXDYgBlsLGzVVemFUcYhDCvIAAAAABAA8AKMG3gcsAAsAGQBvAHAAAAEyFhUUBiMiJjU0NgUGBiMgAzcWFjMyNjY3ATYzMhYWFRQGBxYWMzI2Nzc+AjMyFhYVFAYGIyImJzcWFjMyNjY1NCYjIgYGBwcOAiMjFhUUBgYjIiYmJzcWFjMyNjU0JicGByc2NjU0JiYjIgYHAQRqKT09KSk9PQHSK9Cs/s+FjTaOakplQRv7r5qeaJZLTUwgPSIpQSU9NmBqRlaTUVekbVONSmQzXzoxVjZURyY8OCM2HEdgQAEbV5FbeMavVpBhznZTXEhONkAJfoItSCtBdlUDBwbtOysrOzsrKztEvLsBdzGWiT94a/3kTkp/UFKDKxwZLjtiV1glV6dxbKVZP0NrLywvZEdldh1AO1svTi04Ol6BPXT3zDTv5UxKQng8CgKQA1JVMD4cICsC+QAAA/6kBPr/SQcsAAMABAAFAAADESMRERO3pWAG5f6NAXP+FQIyAAAAAAH8s/7R/1P/YwADAAABNSEV/LMCoP7RkpIAAAAD/d8E+v9OBywAAwAEAAUAAAMjAzMDA7KG6d4ZaAV2AYb9/gIyAAAAA/6TBPoAAQcsAAMABAAFAAADMwMjFxPc3eiGEWAG/P56fAIyAAAA//8AAP+gBjcE+gImCYIAAAEHDB8DZgEqAAAAAP//AAD/0wapBPoCJgmDAAABBwwfAyABXQAAAAD//wAA/9MEnwT6AiYJhAAAAQcMHwOOAV0AAAAA//8AAP8YBg4E+gImCYkAAAEHCacEfQCiAAAAAP//AAD+dgUjBPoCJgmOAAABBwmnBPEAAAAAAAD//wAA/nYEngT6AiYJjwAAAQcJpwR4AAAAAAAA//8AAP+gBkkE+gImCZgAAAEHDB8DZgEqAAAAAP//AAD/tQTCBPoCJgmcAAABBwwfA3oBPwAAAAAAAwAA/rUHCAT6AFsAXABdAAATNjYzMhYWFxEhNSEVIRE2NyYmNTQ2MzIWFhUUBgcWFwcOAhUUFjMyNjcXBgYjIicGBhUUFjMyNjcXBgYjIiYmNTQ3JiY1NDY3JicGBgcRIxEBJwEuAiMiBgcBATdXlktRgH5Q/PIG8PzDoFcICk86M04oMi8tQhhHXSVWUDldUjNIhUQlIQ8LVVE5XVIzSIVEapRJHzc4ZFwuCyq1VaX992IB+ENVWDg+d04CngKyA1UqJjNvZwHMkpL+OQoaEzcQN0MqRywySRRRYEMJLTceNTkWI4EkIgUQIhQ2OBYjgSQiRnlOOzAlbEFKdiFSGBEgBf3sAd/+pooBMVVPJyUnAjv9BgAAAAMAAP5rBg4E+gBgAGEAYgAAAQYGIyImJjU0NyYmNTQ2Njc2NTQmIyIGByc2NyYmIyIGFRQeBBcHLgQ1NDY2MzIWFzY3ESE1IRUhER4DFRQGBw4CFRQWMzI2NxcGBiMiJwYGFRQWMzI2NwEBBg5IhURqlEkfNzg+hGASVU5mjSudGSc8YjJPWwsaL09+VW5yfWQ9HVaYV1aWSGKL/D0Ftv6xKU48JRQZbHovVlA5XVIzSIVEJSEPC1VROV1S/egBn/6xJCJGeU47MCVsQTtkShE5PFdemJYxYEUtJFpMHDY5RVRvR2thdHRqZTVdi0w5OlsTAQWSkv7yDDFObEc4bj4JLDolNTkWI4EkIgUQIhQ2OBYjBcj8xQAD/EX82gBdAAEATQBOAE8AABMGBiMiLgI1ND4CNzY2NTQmIyIOAgcnNjY3JiYjIgYVFB4CFwcuAzU0PgIzMhYXNjYzMh4CFRQGBwYGFRQeAjMyPgI3AQFdKmxCPWBCIxw6XEALDEE2HDk2MRSOCxoRIkcsP0AlR2hDX0Z5WjMvSlssSHc4MHdHKFdILyEddGIVIiwYGCkoKhv+bwFn/RAVISI9VTMqST0uDhYvHDs3EzNaRiUlQx0ZHkQ2K05RWjZmPHBvc0BAWzscLiwsLho4W0A8bS8NQTQYJBYLBgwTDgJw/ioAAAAAAvxF/NoBJQABAGAAYQAAAQYGIyIuAjU0NyYmNTQ2NyYjIg4CByc2NjcmJiMiBhUUHgIXBy4DNTQ+AjMyFhc2NjMyHgIVFAYVBgYVFB4CMzI+AjcXBgYjIiYnBgYVFB4CMzI+AjcBASUqbEI9YEIjByotOUgeSRw5NjEUjgsaESJHLD9AJUdoQ19GeVozL0pbLEh3ODB3RyhXSC8BV0QVIiwYGCkoKhsoKmxCChMKAQEVIiwYGCkoKhv9p/0OFCAgPFMzIBsbUjM3WiI2EzNaRiUlQx0ZHkQ2K05RWjZmPHBvc0BAWzscLiwsLhs8XUIDDAMPNyMUHBIIBgwTDn0UIAEBBQsFGCQWCwYMEw4CdQABAY4AAAIzBPoAAwAAIREzEQGOpQT6+wYAAAAAAgGOAAAD9QT6AAMABwAAIREzESERMxEDUKX9maUE+vsGBPr7BgAAAAMAmQD3A84E+gAPAB8AIAAAATIWFhUUBgYjIiYmNTQ2NhMyNjY1NCYmIyIGBhUUFhYTAjJ2u2tpu3Z1vGpqunJHcj47bUdGcT87bEoEKGi8dna6Z2i8dne7Zf1hRnlLS3ZCRHtMTHVBA3EAAAIBB/9fA3YFDwAtAC4AAAE+AjU0JiMiBgYVFBYXByYmNTQ2NjMyFhYVFAYGBwUWFhUUBgcnNjY1NCYnATcBDpy8Y1VDLEEfSlFhbWFFh1hgj0pGp5MBAEhAJjmFHB8hMf6M3wI2Y5aKQ0FPIDUdM0QcZy99VzxvQUh8TlGQpmnLOXhLNWZMYx8+Jyg7KgE1/AAAAAACAK3/6APRBQ8AJgAnAAAFAicGIyImJjU0NjMyFhc2NjU0JiYjIgYHJzY2MzIWFhUUBgcWFhcBA07KmzQ3L00sRj0qUThhdT1sSEV6UTRQnlZ2uWuJfVGvTP2tGAEulQkgPig5QSYpJKhqT242HyiQJiRWsoKL3j9Q2HIB8AAAAAIAuP93A5kFDwA2ADcAABM2NjMyFhYVFAYHFhYVFAYHFhcHJiYnBiMiJiY1NDYzMhYXNjY1NCYmJwYjJzI2NjU0JiMiBgcTuEmkVmydUTk+W2B/eXN2hXFnJzFJL00sRj04YDNiXDJhQkJOCHaGNVxTSIlIoATBJihFfFJNciomg1tnoCxznFacfigHID4oOUExLRRiSy9OLwMMkilKOUBIKCP9EAAAAAIAgv/8A+oFDwAxAD8AAAEeBBUUBgYjIiYmNTQ+AjcuBDU0NxcGFRQeAhc+AzU0JzcWFRQOAgcOAhUUFjMyNjU0JiYCnDVKLhkLS41dVpRZEi1bPVZ2SiUPDqcOGT1wRFRvOBUNpQ8XQYHfPkIYTkpEUhlBApQ4X01COy9LeURDfk8tR1h1QlWGalJJMjI8ICwiK1FigUNXhV9CICcsID4wNFlwmepDX0QhP01LPSNGYAAAAAEArP93BGQFDwAlAAABBgYVFBYzMjcmJjU0NjMyFhYVFAYHFhIXByYCJwYjIiYmNTQ2NwHRRTyPlwgeBwVPQTVQKDo6LaA+kzymJxwtkdNvPjoE43a9W5CPAxIoDj5FMEkoOVEVcf7DXkFmAWJsBGTMlmbdXwAAAAEA0/93BFUFDgA/AAABJiY1NDY2MzIWFwcmJiMiFRQXNjMyFhcHJiMiBhUUFhYzMjcmJjU0NjMyFhYVFAYHFhYXByYmJwYjIiYmNTQ2AXBPTmKsbD19GgwlXT3ekUpXFDwYDSBNbndBcEQWHgcFTTw5Uyg9NRxlOZMzYR41LIW+XjIC+y6FVVV5PQ0IjwgPknQqEgQEjwVcVT5VKQMSKA4/RDBJKDlSEEGwTUFXxUkHXp1ePGsAAAACAE4AZAQZBPoAKAApAAATNxISFhYzMjY2NTQmJiMiBhUUFhcHLgI1NDYzMhYWFRQGBiMiJiYCAU6fFlZ3hU9QWyU7ZkAxO4yCInyuVo6CaqxgXaNticKRZQFKBJUf/uL+fMdQSoZocbxjPC9KTgOQCFOCUHB8jPiZnMtecPQBnQGVAAABAJQAAAQBBQ8AHwAAARcBDgMVFB4CMzI2NxcOAyMiLgI1ND4CNwMOgP5SMEAlECQ/VTFqxGdIL2hzgEdOlXNGEzFVQwUPbf36OlpJQCA3TC8VRkSXHDImFypWhFswWmd7UAAAAwCR/18ENwUOAB8ALwAwAAABFhcFFhYVFAYHJzY2NTQmJwEuAjU0NjYzMhYWFRQGJzI2NjU0JiYjIgYGFRQWFgUCLC4vASVFRCY5hRwfITH9/EBJHludXmKeWZDLM1QvK1E0NFMuLE8BYAJ1HibtOHlNNWZMYx8+Jyg7KgGrNWdnQVuaV1adY4eobjJaOTdYMTNbODlXL5EAAAAAAwCEAjwC5AT6AA8AHwAgAAABIiYmNTQ2NjMyFhYVFAYGAyIGBhUUFhYzMjY2NTQmJgMBtVOLU0yMWVSLUE+LVjFLJitLLC5KKipKLQI8SIRSS4NOSoNPToRMAbQpRCktRiUmRiwqRiYBCgAAAgDXBDoCDwU2AA8AEAAAATIWFhUUBgYjIiYmNTQ2NhcBTyA2IiI3HyA2IiE34AU2IDwiIzsgIDsjIjshPAAA//8APAAABn0HLAImCXIAAAEHCbAF8AAAAAAAAAABAAAAAASfBPoAFwAAMzUhESERFAYjIiYmNTQ2MzMRITUhFSMRqAJ+/oU1LzN2Sj0/N/75BJ/VkgPW/Z82PFd9NTU0AWGSkvuYAAAAAQAAAAAF5gT6ACYAAAEjESE1IREjIiYnFhYVFA4CIyImAic3EhYzMjY1NCYnNyE1ITUhBebV+5cDxPElOBNNUzFTbj55x65NkGHNdUlNcWg6AgL7lAXmBGj7mJIClQEDP5lNQ2RCIYcBFc40/vf+RkRKkjyEr5IAAgCBAAADYgUPABcAGAAAIRE+AjU0JiYjIgYHJzYzMhYWFRQGBxEDAXBulEVEbT5HeFQ0q5t/u2Gvns0CngFEc0xGZTAbKI5HX6puj84j/egCDAAABAAAAAAFIwT6ADEANQA2ADcAAAEmJjU0NjYzMzUhNSEVIxEhIgYGFRQWFzYzMhYWFRQGBiMiJiYnNx4CMzI2NTQmIyIBNSEVAQMCgICMSIp22fxrBSPp/n1BQCRANUpubqVaYruDlfrjYYhTtcZ3gX9hYm/91AP9/vCHAgMiiltEWTOOkpL+4AwjGR4zDhg8dlJQfEZXyqZFi6dIP0UzO/3ZkpIE+vsGAAAAAAIAAAAABK8E+gAgACkAAAEVIxEhNSE1BgYjIiYmNTQ2NjMyFhcHJiMiBwE2NxEhNQEBBhUUFhYzMgSv1fzOAo5OoVpytmZvy4UzhxwMeUstGAEELyT8ygJt/vFaQGpBQQT6kvuYkugzLladZnKlVQ0IjRUE/qIgJgIzkvy6AW06dEZfLQAAAAADAKYD4AHfBywADAANAA4AAAEXDgMHIz4DNxcDAdEODicvMxmJDh0bFgi7BQXVFjd5fXo4PISEfDXbAjIAAAAAAf/K/q0ANgZNAAMAAAMRMxE2bP6tB6D4YAAAAAH+1f6uASsGTQAOAAATFwcnESMRByc3JzcXNxdK4UuqbKpL4eFL4OBLBSvcSan5/wYBqUnc2Unc3EoAAAgAagDeA6oEHQAKABIAGgAiACoAMgA6AEQAAAEUBiMiNTQ2MzIWBRQjIjU0MzIFFCMiNTQzMhMUIyI1NDMyBRQjIjU0MzITFCMiNTQzMgUUIyI1NDMyBRQGIyI1NDMyFgJFHR86HB4fHf73Ozs7OwISOzs7O1w8Ozs8/TY7Ozs7XDs7OzsCEjs7Ozv+9x0fOjofHQPjHh07HhwchDs7Ozs7Ozv+xjs7Ozs7Ozv+xjs7Ozs7OzuhHh07Ox3//wA8/zgGOwcsAiYJcgAAACcJsQYdAAABBwmnBH8AwgAAAAD//wA8/zgGOwUPAiYJcgAAAQcJpwR/AMIAAAAA//8APP84CE0FDwImCXMAAAEHCacEfwDCAAAAAP//AAD+YwPoBPoCJgl0AAABBwmnA9H/7QAAAAD//wAA/mMD9QcsAiYJdAAAACcKDwPKAAABBwmnA9H/7QAAAAD//wAA/nYEgAT6AiYJdgAAAQcJpwR7AAAAAAAA//8AAP52Bj0E+gImCXcAAAEHCacEewAAAAAAAP//AAD+/AbwBPoCJgl4AAABBwmnBBsAhgAAAAD//wAA/noFuQT6AiYJeQAAAQcJpwQbAAQAAAAA//8AAP6IBIsHLAImCXwAAAAnCbADuQAAAQcJpwOfAIQAAAAA//8AAP6IBIsHLAImCXwAAAAnCbEEgAAAAQcJpwOfAIQAAAAA//8AAP6IBIsE+gImCXwAAAEHCacDnwCEAAAAAP//AAD+iASLBywCJgl8AAAAJwmyBIAAAAEHCacDnwCEAAAAAP//ADz/OAiABywCJglzAAAAJwmnBH8AwgEHCbAH8wAAAAAAAP//ADz/OAhNBywCJglzAAAAJwmxCC8AAAEHCacEfwDCAAAAAP//ADz/OAhNBywCJglzAAAAJwmyCC8AAAEHCacEfwDCAAAAAP//ADz/OAhNBywCJglzAAAAJwmzCC8AAAEHCacEfwDCAAAAAP//AAD+tQcIBPoCJgnGAAABBwmnBBsAhgAAAAD//wAA/msGDgT6AiYJxwAAAQcJpwQbAAQAAAAA//8APP84Bn0HLAImCXIAAAAnCbAF8AAAAQcJpwR/AMIAAAAA//8AAP+gBNgE+gImCYUAAAEHCacDmAEqAAAAAP//AAD+dgWyBPoCJgmGAAABBwmnBPEAAAAAAAD//wAA/6AFMQT6AiYJhwAAAQcJpwPUASoAAAAA//8AAP52BbkE+gImCYgAAAEHCacE9gAAAAAAAP//AAD+YwYIBPoCJgmKAAABBwmnA9H/7QAAAAD//wAA/3kGDQT6AiYJiwAAAQcJpwRjAQMAAAAA//8AAP52BCYE+gImCYwAAAEHCacEfwAAAAAAAP//AAD+dgTMBPoCJgmNAAABBwmnBJ4AAAAAAAD//wAA/5sF5gT6AiYJkAAAAQcJpwRSASUAAAAA//8AAP5ZBK0E+gImCZEAAAEHCacD8//jAAAAAP//AF//tQVABQ4CJgmSAAABBwmnBBABPwAAAAD//wAA/xIERgT6AiYJkwAAAQcJpwPpAJwAAAAA//8AV/9OBQoFDwImCZQAAAEHCacEIwDYAAAAAP//AAD/oASqBPoCJgmXAAABBwmnA6wBKgAAAAD//wAA/6AErwT6AiYJmQAAAQcJpwPKASoAAAAA//8AX//JBb0FDgImCZoAAAEHDB8EpgFTAAAAAP//AAD/vwTmBPoCJgmbAAABBwwfA/IBSQAAAAD//wAA/pUFiwT6AiYJnwAAAQcJpwUBAB8AAAAA//8AAP+gBJEE+gImCaIAAAEHCacDygEqAAAAAP//AFH/JgWPBQ4CJgmjAAABBwmnBAEAsAAAAAD//wAA/6AEvgT6AiYJpAAAAQcJpwOsASoAAAAA//8AAP/TBX0E+gImCaUAAAEHDB8DIAFdAAAAAP//AAD+EQReBPoCJgmmAAABBwwfA0X/mwAAAAAAAgBf/3oF0AUPAEcAVAAAJSYmNTQ2MzIWFhUUBgcWFwcmJwYjIiYmNTQ2NjcmJjU0NjYzMhYWFRQGBxYzMjcRIzUhFSMRIxEGBiMiJicOAhUUHgIzMgM2NjU0JiYjIgYVFBYCJgcFSTw2UScyMEpgd4Q3JEp1uWg8Y1FYX0qTZVSMVVReP2HJkLYCMNWlRKtVUqtGSlMrFjddSA81UEofRDRHVlH5EigOOUAySyY5QxNgWVWYWwZSkVlFc1kwOZRdSHVENG5PWIhCEl8BH5KS+5gCmR8hJB8tSE4wHkA0IQI8M2o+ITgiSDw+bgABAAD/sAU/BPoAKQAAEzchNSE1IRUjESMRISInFhYVFAYHFhYXByYmJyYmNTQ2MzIWFzY2NTQmszEC4fw7BT/Vpf7Wgj54c25jL5RUQ1rMXoV5OjQtWTpQX7kC/orgkpL7mAL2CT6cZmOLHB0/Go8jeUsDPz4wOi0yCltDZpcAAAL+FQTrACsHLAAUABUAAAEmJjU0NjYzMhYXByYmIyIGFRQWFyf+ejE0Uo9bRGwqLSNNLVBZMTl+BOtQm0JYfj4dFocTGVhNO39UDwAC/Eb93P++/+gABgAHAAAFAScBMwEHB/5X/k9gAftSASt5hq7+1oYBOv6PT0wAAAABAAAAAATuBPoALgAAARUhETY2MzMVIyIGBxEjEQYGIyImJjU0NjYzMhYXByYmIyIGFRQWFjMyNjcRITUE0v7KNnJFZUtdbjylR5FQZatgZrh3KHIoDCNkJnWDOl42So9L/QkE+pL+RCUhkikz/fwBZT03VKFsa6JXDAmVCA1sXkdhLkZNAlSSAAAAAAIAAAAPBZoE+gAnAEMAAAEWFRQGBx4CMzI2NxcGIyIuAicmJjU0NjMyFhc2NjU0JichNSEVEwYGIyImJjU0NjYzMhYXByYmIyIGFRQWMzI2NwJAIoKJa8PWfnbEVjHI+HXR0sthICVDQDNNHyQmFA7+ZQVNFUCgVl+aWVmiailsGQwfSyVlbmtcPXY2BGhPcJvMPH6ZTjU0h3Q3fch/K04xMT8vKiByTT1jHpKS/VE0Nk6SX2GPSw0IjQcOX1ZSWTExAAABAAABbQJqBPoAEQAAAREUBiMiJiY1NDYzMxEhNSEVAas1LzN2Sj0/N/75AmoEaP13NjxXfTU1NAGJkpIAAAAAAQAAAPEDsgT6ACkAAAEGFRQWFzYzMhcHJiMiBhUUFjMyNjcXBgYjIiYmNTQ2NyYmNTQ3IzUhFQEnK1RJOkgyKg0aIGhybGFamUhUW853aKFYLChTWxRrAu0EaDFERlcLEAaPA1VOT1ZGTHtVUkuLWjxsJSiPVEAvkpIA//8AAP3cBbIE+gImCYYAAAEHCbgDtgAAAAAAAAACAAAA8QQKBPoAAwAiAAARNSEVEw4DIyImJjU0NjcFNSEyFwcjIgYVFBYWMzI2NjcDSMJAVWFYMmilXS8r/tMCWVEmDkJ7jTlYND1oVjMEaJKS/S03NScRT5RfOmUlA5QDj2hcOlIlJDwzAAACAAAAAAW5BPoAAwBKAAABITUhAQcmJjU0NjYzMhYWFRQGBgQjIiYmNTQ2NyYmNTQ2NjMyFhcHJiYjIgYVFBYXNjMyFwcmIyIGFRQWMzIkNjU0JiYjIgYVFBYFufpHBbn+jkOao0GCW16ZXWPC/uyigbVZHyFbXFSXYiloFwwZSitaXE1SOksuKA0XHWBienCbAQuUK1E3PUN0BGiS/RyBK6ZzSnZFTqR2c+C5ak2HWSxbJSyGVFF0Og0IjQcOQkI0RxQTBo8DSUZPUYTjhD5nO0Q6R2YAAAACAAAAowSyBPoAAwAgAAARNSEVASImJxYWFRQOAiMiJgInNxISMzI2NTQmJzchFQSy/skmOhNPVDFTbz16xq1OkGHMdklNb2o6AioEaJKS/o4BA0SkU0hrRyKQASXdNP7j/u5MS1GeRISSAAABAAD/HwTKBPoARgAAAQYGIxYVFA4CBxYXByYnJiY1NDYzMhYXMjY2NTQmIyIGByYmNTQ+AjMzNSE1IRUjESEiDgMVFBYXNjMyFhcWMzI2NwTKUaVoBS9NYjOFlUPhxIN4OjQqVj9KfEViUyRbKX6OLFRxV8D9kgP24/6WKzggFgxANUtOTH4uL0NNg0IB4joxHR5AYkkwDT4oj1qIBT49MDoqNS1SNkNQExEjm2M8VDsdr5KS/r8HEBcgEypADhgmJRQ0PQAAAAIAAADyBJME+gADACcAABE1IRUTBiMjDgIjIiQnNx4CMzI2NTQmIyIGByc2NjMyFhYXMzI3BG8kUGMEEGSjX7b+7mKMOnSJXG9/aWAzXS84OIc+W5VdDRFaSARokpL9oxVOdz/k3zaEl0dqWVVaGxiUGR0/eVQi//8AAP3cBGME+gImCYwAAAEHCbgDLQAAAAAAAP//AAD93ATMBPoCJgmNAAABBwm4AywAAAAAAAD//wAA/dwFIwT6AiYJjgAAAQcJuAO2AAAAAAAA//8AAP3cBJ4E+gImCY8AAAEHCbgDMgAAAAAAAAACAAABLAPaBPoAEAAeAAABERQOAyMiJiY1ESM1IRUhIREUHgMzMj4CNQNdHDtVbEN2nE6iA9r+3/6NDx0rOiZASScMBGj+Kk9vVjcbWLKaAZiSkv5qSVk9JREnR1pNAAACAAD/5wMzBPoAAwAYAAARNSEVAyIGBhUUFhYXBy4CNTQ+AjMhFQMRvYGBUD9+a26PhUs3dqd9AQUEaJKS/jgbUk1BeoNWa3ubnlVDe1wokgAAAAADAF8BDgQTBQ4AKAApACoAAAEkNTQ2NjMyFhYVFAYHFhYzMjY3FwYGIyImJic2NjU0JiYjIgYVFBYXAwEBfv7hRXRJX51Tm6AzeWJZnE9MX8p0c7yBHbOvLU4xNUFPV+YCaAMhKd1LajJUl2OPwj9QQUVPilBLWLB+JIx4Ql4vMyw0Pgn+UQL4AAAA//8AAP3cBPAE+gImCZMAAAEHCbgDugAAAAAAAAACAFcA0wPcBQ8AOAA5AAABDgIjIiYmNTQ2NyYmNTQ2NjMyFhYVFAYHJzY1NCYjIgYGFRQeAhc2MzIXByYjIgYVFBYzMjY3AwPcQHKOWGihWCQoa21LiFVHdEMpKH0pMi4mPyUlPk8pOkoyKg0aIGhybGFamUirAW02PyVLi1o2Zygpl2pQg0ozYD0wXiVPKDInLypKLTNELBYEEAaPA1VOT1ZGTAMFAAACAAABggMUBPoAAwAQAAARNSEVARUUBiMiJiY1NDMhFQL7/qg3MjByTHwCTARokpL9+m43O1V+NmmSAAABAAABLAN9BPoAGQAAAREUHgMzMjY3Fw4CIyIuAjURIzUhFQFGDh8uRjFJkz1MVW1wQlWMXyeiApQEaP6LYmA3KRRGP4s8NBs6b5N2AYqSkgAAAAACAAAAAAUABPoAGgAnAAAhEQYjIi4CNREjNSEVIRE2NjMzFSMiBgYHEQERFB4EMzI2NxEDCXiVTYtbJ6IE5P7KMW5OZUtKUEYn/ZgGEiMnNB5Kg0IBjGA7b4x0AZKSkv5KIR+SDiIh/fEEaP55PFI+MBsNPUMCKwADAAAA8QOEBPoAAwAaACMAABE1IRUTBgYjIiYmNTQ2NjMyFhcHJiMiBwE2NwcBBhUUFhYzMgNUMF/IcHK2Zm/LhTOHHAx5SykjARMhKsD+4lRAakFIBGiSkv0kUEtWnWZypVUNCI0VBf6oGCl+AWY7b0ZfLQAAAAACAF8BNQRDBQ4AJgAqAAABFRQGIyImJjU0NjMzNTQmJiMiBhUUFhcHJDU0NjYzMh4CFRUhFQE1MxUCpzYtMHRRPz08Jk06ODlpbA/+sEZ2SUd3XyYBnP71agH/SjxEU38yKy3YenE3NSo+OwWSHPBNaDEoZpZs7ZICaZKSAAAAAAEAAAE1A04E+gAVAAABESEVIRUUBiMiJiY1NDYzMxEhNSEVAagBpv5aNi0wdFFAPTf+/AKtBGj+KZJKPERTfzIrLQHXkpIAAAIAAAEOA5cE+gAcAB0AAAEWFhUUBgcWFjMyNjcXBgYjIiYnNjY1NCcjNSEVAQGxHB9zcSl/YladRE5hwW+x6R6EdUv8Ar79kARoNG81bJckZ2NJS4pSSe7WHGRWXWOSkv3YAAACAAACCAN9BPoAAwARAAABITUhEwYGIyIkJzcWFjMyNjcDUPywA1AtPpxSgv79iUCKvmFomVAEaJL9WyYnVU54SzkqLgAAAAACAAD/5wRyBPoAAwAsAAARNSEVEyYjIgYHJzY3JiYjIgYVFB4EFwcuBDU0NjYzMhYXNjMyFhcEHikpK2KMLJ0ZJzxiMk9bCxovT35VbnJ9ZD0dVphXVJhHerAePhcEaJKS/mMOmJYxYEUtJFpMHDY5RVRvR2thdHRqZTVdi0w4OnIJCAAEAAAAggXZBPoAAwAiADEAQAAAETUhFQE+AjMyFhYVFAYGIyImJicGBiMiJiY1NDY2MzIWFhMWFjMyNjY1NCYmIyIGBycmJiMiBgYVFBYWMzI2NwXZ/U4pW2hGYJ9eWKRsR3lnOzaPbGGdX1ylZ0Z2Z0lKe0I6YjwzVjVJbTWHTXlAOmI8NFQ0SG81BGiSkv56PEceWrF5bK1kJ0Q3UFJYsHtxrl8lQ/6FTEAxbVJUai9idktOPzFtUlNrL2N1AAACAAAA8QNmBPoAAwAeAAARNSEVEwYGIyImJjU0NjYzMhYXByYjIgYVFBYWMzI3A0AmXcJqbq5iasJ/MIEbDHJGgY08ZT2sigRokpL9JE5NVp1mcqVVDQiVFXFlQ1wrkgACAFH//gNPBQ4ALgAvAAABNjY1NCYmIyIGFRQWFwcuAjU0NjYzMhYWFRQGBxYXByYmJwYjIiYmNTQ2MzIWBwGIbXw3Y0A7QYyCInyuVj6AXWirXo1+pqGDYqhTOD4vTSxGPSlRVQI0JLZ+SXVBOjFKTgOQCFOCUEZqPGCzdo7vRJHaW4rBRgsgPig5QSVJAAACAAABLAORBPoAEQAbAAABARcOAyMiLgI1ESM1IRUDAREUHgMzMgGjAaJMSVdZWjVVjF8nogMhQP5lDh8uRjFkBGj94Y0zMB8OOm+TdgGKkpL9jgIY/uViYDcpFAAAAAABAAD/5wPbBPoAKQAAASInBgcWFhcHLgInJiY1NDY2MzIWFzY2NTQmJyE1IRUjFhUUBxYzIRUDDN1yLTlU4n1uct6qKBgRGzstM00fJCYUDv5lAzr6Ikg4dgETAhMsIRhs4GVuY+HKQik4IBsxIC8qIHJNPWMekpJPcKFcB5IAAAEAAP7dBFIE+gAuAAABFSEiBgYVFB4CFwcuBDU0NyYmNTQ+AjMzNSE1IRUjESEiBgYVFBYXNjMEUv4qhI9PHEOBeW5vb1w5HIE1NDJVblOg/YoD+t/+tkA/JjQsYb8CZpMqZEgwUlh6YWtdZmhkZjmaYCplPT9XOBqvkpL+vxAuIyg/ExoAAAACAF//egQbBQ8AQABNAAAlJiY1NDYzMhYWFRQGBxYXByYnBiMiJiY1NDY2NyYmNTQ2NjMyFhYVFAYHFjMyNjcXBgYjIiYnDgIVFB4CMzIDNjY1NCYmIyIGFRQWAiYHBUk8NlEnMjBKYHeENyRKdbloPGNRWF9Kk2VUjFVUXj9hSHc7JD2JQ1KrRkpTKxY3XUgPNVBKH0Q0R1ZR+RIoDjlAMksmOUMTYFlVmFsGUpFZRXNZMDmUXUh1RDRuT1iIQhIVGZYUFSQfLUhOMB5ANCECPDNqPiE4Ikg8Pm4AAgAA/7ADpwT6AAMAJQAAETUhFQMiJxYWFRQGBxYWFwcmJicmJjU0NjMyFhc2NjU0Jic3IRUDd9yCPnhzbmMvlFRDWsxehXk6NC1ZOlBfubgxAsMEaJKS/o4JPpxmY4scHT8ajyN5SwM/PjA6LTIKW0NmlzeKkgD//wAA/6AE7gT6AiYKEQAAAQcMHwNmASoAAAAA//8AAP/TBZoE+gImChIAAAEHDB8DIAFdAAAAAP//AAD/0wJqBPoCJgoTAAABBwmnA44BXQAAAAD//wAA/6ADsgT6AiYKFAAAAQcJpwOYASoAAAAA//8AAP3cBbIE+gImCYYAAAEHC7UDtgAAAAAAAP//AAD/oAQKBPoCJgoWAAABBwmnA9QBKgAAAAD//wAA/nYFuQT6AiYKFwAAAQcJpwT2AAAAAAAA//8AAP8YBLIE+gImChgAAAEHCacEfQCiAAAAAP//AAD+YwTKBPoCJgoZAAABBwmnA9H/7QAAAAD//wAA/3kEkwT6AiYKGgAAAQcJpwRjAQMAAAAA//8AAP3cBGME+gImCYwAAAEHC7UDLQAAAAAAAP//AAD93ATMBPoCJgmNAAABBwu1AywAAAAAAAD//wAA/dwFIwT6AiYJjgAAAQcLtQO2AAAAAAAA//8AAP3cBMwE+gImCY8AAAEHC7UDlgAAAAAAAP//AAD/mwPaBPoCJgofAAABBwmnBFIBJQAAAAD//wAA/lkDMwT6AiYKIAAAAQcJpwPz/+MAAAAA//8AX/+1BBMFDgImCiEAAAEHCacEEAE/AAAAAP//AAD93ATwBPoCJgmTAAAAJwm4A7oAAAEHCacDowCcAAAAAP//AFf/TgPcBQ8CJgojAAABBwmnBCMA2AAAAAD//wAA/9MDFAT6AiYKJAAAAQcMHwO2AV0AAAAA//8AAP+gA30E+gImCiUAAAEHCacDrAEqAAAAAP//AAD/oAUABPoCJgomAAABBwmnA2YBKgAAAAD//wAA/6ADhAT6AiYKJwAAAQcJpwPKASoAAAAA//8AX//JBEMFDgImCigAAAEHCacEpgFTAAAAAP//AAD/vwNOBPoCJgopAAABBwmnA/IBSQAAAAD//wAA/6ADlwT6AiYKKgAAAQcJpwNmASoAAAAA//8AAAAAA30E+gImCisAAAEHDB8EMwGKAAAAAP//AAD+lQRyBPoCJgosAAABBwmnBQEAHwAAAAD//wAA/uYF2QT6AiYKLQAAAQcJpwPvAHAAAAAA//8AAP+gA2YE+gImCi4AAAEHCacDygEqAAAAAP//AFH/JgNPBQ4CJgovAAABBwmnBAEAsAAAAAD//wAA/6ADkQT6AiYKMAAAAQcJpwOsASoAAAAA//8AAP/TA9sE+gImCjEAAAEHDB8DIAFdAAAAAP//AAD+EQRSBPoCJgoyAAABBwwfA0X/mwAAAAAAAwAA/+cGNwT6ADgAOQA6AAABFSERNjYzMhYWFRQGByc2NjU0JiMiBgcRIxEBJyUmJjU0NjYzMhYXByYmIyIGFRQWFjMyNjcRITUhAQY3/WUzdklXiE5FQJQ0QFBIN3I5pf3HYAEFeopmuHcocigMI2QmdYM6XjZKj0v9CQL3/tgE+pL+YjI2S5ZnaNVeVUKtU15fTEn98gFi/oWGlh6vgmuiVwwJlQgNbF5HYS5GTQJUkvv3AAAAAAIAAP8PBqkE+gAdAEUAAAEjESM1ASclLgInJiY1NDYzMhYXNjY1NCYnITUhAR4CFyU1BgYjIiYmNTQ2NjMyFhcHJiYjIgYVFBYzMjY3ESEWFRQGBqnVpf0NVQEca9PMYCImQ0AzTR8kJhQO/mUGqfqvcMDXgwFNOIVGX5pZWaJqKWwZDB9LJWVua1w9djb9ESKCBGj7mKn+ZpKLG3vHfS1QMTE/Lyogck09Yx6S/QyClU0BqUshJU6SX2GPSw0IjQcOX1ZSWTExAipPcJnOAAABAAD/zgSfBPoAGQAAJQEnAREhERQGIyImJjU0NjMzESE1IRUjESMDJv47YgIn/oU1LzN2Sj0/N/75BJ/VpPv+04oBTQLD/Xc2PFd9NTU0AYmSkvuYAAAAAwAA/84E2AT6ABUALAAtAAABFSMRIxEBJyUmJjU0NjcmJjU0NyM1BQYVFBYXNjMyFwcmIyIGFRQWMzI2NxEBBNjVpf2TYAEvgo0sKFNbFGsBJytUSThKMioNGiBocmxhWplI/rQE+pL7mAFZ/nWGqxSZdTxsJSiPVEAvkpIxREZXCxAGjwNVTk9WRkwCVfyJAAAA//8AAP3cBbIE+gImCYYAAAEHChAEsgAAAAAAAAACAAD/zgUxBPoAIwAkAAABFSMRIxEBJyUmJjU0NjcFNSEyFwcjIgYVFBYWMzI2NjcRITUBBTHVpP2dYAExe4svK/7TAllRJg5Ce405WDQ9aFYz/EgCigT6kvuYAV7+cIatGqN1OmUlA5QDj2hcOlIlJDwzAlSS+/cAAAD//wAA/dwFuQT6AiYJiAAAAQcKEAS1AAAAAAAAAAEAAP/OBlQE+gAmAAABIxEjNQEnAREhIicWFhUUDgIjIgADNxYWMzI2NTQmJzchNSE1IQZU1aX+O2ICJ/6hSxNFSTFTbz2y/ut0kGLKd0hOcWg6AnD7JgZUBGj7mPv+04oBTQFRAjeESz5cPR4BHQEbNPLjPjtCfzaE4JIAAAAAAgAA/x8GhgT6ADgAUQAAASYmNTQ+AjMzNSE1IRUjESM1AScBNQYGIyMWFRQOAgcWFwcmJyYmNTQ2MzIWFzI2NjU0JiMiBgERISIOAxUUFhc2MzIWFxYWMzI2NjcRAXJ+jixUcVfA/ZIGhtWl/t92AZdSv3cYBS9NYjOFlUPhxIN4OjQqVj9KfEViUyRbAXj+lis4IBYMQDVLTkt8LhxJOkl1ZUMBsCObYzxUOx2vkpL7mOT+6nMBWjQuKh0eQGJJMA0+KI9aiAU+PTA6KjUtUjZDUBMCp/6/BxAXIBMqQA4YJSMNChoyNgHfAAABAAD/zgZJBPoALwAAATY2MzIWFhczMjcRITUhFSMRIzUBJwE1BiMjDgIjIiQnNx4CMzI2NTQmIyIGBwGJOIc+W5VdDRF9YfsxBknVpf47YgInZ4gED2OjYbb+7mKMOnSJXG9/aWAzXS8DkBkdP3lURgFokpL7mPv+04oBTa8qTnZA5N82hJdHallVWhsYAP//AAD93AQmBPoCJgmMAAABBwoQA/oAAAAAAAD//wAA/dwEzAT6AiYJjQAAAQcKEARPAAAAAAAA//8AAP3cBSME+gImCY4AAAEHChAEsgAAAAAAAP//AAD93ASeBPoCJgmPAAABBwoQBDwAAAAAAAAAAgAA/84F5gT6ABgAJgAAASMRIzUBJwERIREUDgMjIiYmNREjNSEFIREUHgMzMj4CNQXm1aX+O2ICJ/7xHDtVbEN2nE6iBeb80/6NDx0rOiZASScMBGj7mPv+04oBTQLD/ipPb1Y3G1iymgGYkpL+aklZPSURJ0daTQAAAAEAAAAABIgE+gAaAAATNjYzMhYWFxEhNSEVIxEjEQEnAS4CIyIGBzdXlktRgH5Q/PIEiNWl/fdiAfhDVVg4PndOA1UqJjNvZwHMkpL7mAHf/qaKATFVTyclJwAAAwBf/84FQAUOAC8AMAAxAAABJDU0NjYzMhYWFRQGBxYWMzI2NxEjNSEVIxEjEQEnJSYmJzY2NTQmJiMiBhUUFhcTJQF+/uFFdElfnVOboDN5YlmcT6gCIdWk/XBgAV6ZzCezry1OMTVBT1fn/jMDISndS2oyVJdjj8I/UEFFTwI1kpL7mAFv/l+GxRDBqiSMeEJeLzMsND4J/V30AAAAAAQAAP94BEYE+gAxADIAMwA0AAAFJicBJyUmJjU0PgQzMzUhNSEVIREjIgYGFRQWFjMyNyYmNTQ2MzIWFhUUBgcWFwETJwNPXDb96lEBX6SyGTtYbYpQCv2kBEb+u2OepldJl28PHgcFTTw5UygxODpW/oXQ4HaWdv7ikZ0kxp05X1tDKxfDkpL+rTZwVkpxQAMSKA4/RDBJKDhMFnBzBSz7BowAAgBX/6gFCgUPAD4APwAAARUjESMRASclJiY1NDY3JiY1NDY2MzIWFhUUBgcnNjU0JiMiBgYVFB4CFzYzMhcHJiMiBhUUFjMyNjcRIzUDBQrVpf1+YAE/fY0kKGttS4hVR3RDKSh9KTIuJj8lJT5PKTpKMioNGiBocmxhWplIf80E+pL7mAFA/miGtBWacjZnKCmXalCDSjNgPTBeJU8oMicvKkotM0QsFgQQBo8DVU5PVkZMAnOS+9kAAAABAAD/zgSOBPoAGAAAETUhFSMRIzUBJwE1IRUUBiMiJiY1NDMhEQSO1aX+O2ICJ/6PNzIwckx8AkwEaJKS+5j7/tOKAU29bjc7VX42aQF0AAMAAP/mBKoE+gAPABsAHAAAASMRIxEBJyUuAjURIzUhBREUHgMzMjY3EQEEqtWk/YtgAVtiejmiBKr8nA4fLkYxSZM9/tgEaPuYAY7+WIbNEGqbkAGKkpL+i2JgNykURj8CJvzEAAAABAAA/+gGSQT6ACUAMgAzADQAACERASclLgM1ESM1IRUhETY2MzIWFhUUBgcnNjY1NCYjIgYHEQERFB4EMzI2NxE1AQMJ/bFgAUg9W0chogZJ/WUzdklXiE5FQJQ0QFBIN3I5/ZgGEiMnNB5Kg0L+8wGN/luE0Aw8ZIRqAZKSkv5iMjZLlmdo1V5VQq1TXl9MSf3yBGj+eTxSPjAbDT1DAiuS/DIAAAAAAwAA/84ErwT6AB0AJgAnAAABFSMRIxEBJyUmJjU0NjYzMhYXByYjIgcBNjcRITUBAQYVFBYWMzIHBK/VpP2eYAEsl6pvy4UzhxwMeUspIwETISr8ygJ2/uJUQGpBSEoE+pL7mAFd/nGGqhWzhHKlVQ0IjRUF/qgYKQJbkvyVAWY7b0ZfLYgAAAEAX//OBb0FDgAyAAABIREhNSEVIxEjNQEnATUhFRQGIyImJjU0NjMzNTQmJiMiBhUUFhcHJDU0NjYzMh4CFQKnAZz+9QKF1aX+O2ICJ/5kNi0wdFE/PTwmTTo4OWlsD/6wRnZJR3dfJgKRAdeSkvuY+/7TigFNWko8RFN/Mist2HpxNzUqPjsFkhzwTWgxKGaWbAACAAD/zgTmBPoAGQAdAAABIxEjNQEnATUhFRQGIyImJjU0NjMzESE1IQERIREE5tWl/jtiAif+PDYtMHRRQD03/vwE5v6G/jwEaPuY+/7TigFNWko8RFN/MistAdeS/ZcB1/4pAAAAAAQAAP/OBMIE+gASACAAIQAiAAABASclJiYnNjY1NCcjNSEVIxEjARYWMzI2NxEhFhYVFAYTAQNJ/XVgAWGVwhqEdUv8BMLVpP2/KX9iVp1E/mgcH3ON/kgBbP5ihscW5bwcZFZdY5KS+5gCaWdjSUsCNTRvNWyX/oEBMv///+n93ANkBPoCJgmdAAABBwoQA6MAAAAAAAAAAQAA/+cFswT6ADMAAAE2MzIXESE1IRUjESMRBScBNSYjIgYHJzY3JiYjIgYVFB4EFwcuBDU0NjYzMhYC1niyHR37xgWz1aT+8HMBgyAnYowsnRknPGIyT1sLGi9PflVucn1kPR1WmFdUlwL2cgUBBZKS+5gBSt97ARTVCpiWMWBFLSRaTBw2OUVUb0drYXR0amU1XYtMOP//AAD93AYzBPoCJgmgAAABBwoQBdgAAAAAAAAAAgAA/84EkQT6ACEAIgAAARUjESMRASclJiY1NDY2MzIWFwcmIyIGFRQWFjMyNxEhNQEEkdWk/bxgASmUqmrCfzCBGwxyRoGNPGU9rIr86AHdBPqS+5gBTf6BhqkRs4lypVUNCJUVcWVDXCuSAlWS+/cAAAADADkAAAW0BQ8AIwAwADEAAAEjESMRAScBJicGByc2NyYmNTQ2NjMyFhYVFAYHFhYXESM1IQE2NjU0JiMiBgYVFBYBBbTVpf23YgIE552uxmK4hV1iTJVhWI5SUVJW6W2KAgT8R0ZLUEgpRi1UAo4EaPuYAdL+f4oBNjBDc3OKXk9Ckl1HeUlDfVBQjEAhNw0B6pL+Ki5vP0JGIEEuPWz+nAAAAAQAAP/mBL4E+gAPABIAHQAeAAABIxEjEQEnJS4CNREjNSEFAREFERQeAzMyNjcHBL7VpP13YAFjZ305ogS+/OYBof4BDh8uRjE3ZS3YBGj7mAGO/liGzA5snJABipKS/eECH1r+5WJgNykUIBnKAAACAAD/5wXXBPoAKAAxAAABIxEjEQUnATUhIicGBxYWFwcuAicmJjU0NjYzMhYXNjY1NCYnITUhARYzIREhFhUUBdfBpf7EYgGe/rm2aT52VOJ9bnLeqigYERs7LTNNHyQmFA7+ZQXX/HIoSwG1/c8iBGj7mAG94YoBAQUZTDNs4GVuY+HKQik4IBsxIC8qIHJNPWMekv4IBAFqT3BfAAAAAgAA/dwEkAT6ADoAOwAAJTY2NTQnASclJiMiBhUUFhYXBy4CNTQ2NyYmNTQ+AjMhNSE1IRUjESEiBgYVFBYXNjMyFhYVFAYHAQMnKS8F/o1iAY1Om5WaP56ubre6UEVAOzIyVW5TAUD86gSQ1f4WQD8mNCxaapLKZysy/oACN4A4HRj++YrxOYOATYyvlmub2MFrVJQzMWU6P1c4Gq+Skv6/EC4jKD8TGlunalyZV/4uAAAAAgBf/3oGKgUPAEwAWQAAJSYmNTQ2MzIWFhUUBgcWFwcmJwYjIiYmNTQ2NjcmJjU0NjYzMhYWFRQGBxYzMjY3NSE1IRUjESMRBycBNQYGIyImJw4CFRQeAjMyAzY2NTQmJiMiBhUUFgIIDQlJPDZRJzIwSmB3hDchRG2sXjxjUVhfSpNlVIxVVF5IbW/KZv7vAorVpdt7AVZOxWxZvUxKUysUMFRBDRpQSh9ENEdWUfkSKA45QDJLJjlDE2BZVZhbBlORWEVzWTA5lF1IdUQ0bk9YiEISPkP9kpL7mAEF9m8BXNcrLSQfLUhOMB5AMyICPDNqPiE4Ikg8Pm4AAQAA/7AF2gT6AC0AABM3ITUhNSEVIxEjEQcnATUhIicWFhUUBgcWFhcHJiYnJiY1NDYzMhYXNjY1NCazMQN8+6AF2tWl4HYBVv47gj54c25jL5RUQ1rMXoV5OjQtWTpQX7kC/orgkpL7mAFq/XUBXLgJPpxmY4scHT8ajyN5SwM/PjA6LTIKW0Nml///AAD+ywY3BPoCJgpXAAABBwmnBCEAVQAAAAD//wAA/w8GqQT6AiYKWAAAAQcMHwMgAV0AAAAA//8AAP52BJ8E+gImClkAAAEHCacD5QAAAAAAAP//AAD+ywTYBPoCJgpaAAABBwmnBFMAVQAAAAD//wAA/dwFsgT6AiYJhgAAACcKEASyAAABBwmnA1MA1AAAAAD//wAA/ssFMQT6AiYKXAAAAQcJpwTBAFUAAAAA//8AAP3cBbkE+gImCYgAAAAnChAE5wAAAQcJpwMsAJkAAAAA//8AAP60BlQE+gImCl4AAAEHCacEfQA+AAAAAP//AAD+YwaGBPoCJgpfAAABBwmnA9H/7QAAAAD//wAA/qsGSQT6AiYKYAAAAQcJpwRjADUAAAAA//8AAP3cBCYE+gImCYwAAAAnChAESgAAAQcJpwMIAL8AAAAA//8AAP3cBMwE+gImCY0AAAAnChAElQAAAQcJpwMVAKkAAAAA//8AAP3cBSME+gImCY4AAAAnChAEsgAAAQcJpwNTANQAAAAA//8AAP3cBJ4E+gImCY8AAAAnChAEggAAAQcJpwMQAL0AAAAA//8AAP6UBeYE+gImCmUAAAEHCacEUgAeAAAAAP//AAD+/ASIBPoCJgpmAAABBwmnBBsAhgAAAAD//wBf/ssFQAUOAiYKZwAAAQcJpwTVAFUAAAAA//8AAP6WBEYE+gImCmgAAAEHCacEVQAgAAAAAP//AFf+rQUKBQ8CJgppAAABBwmnBJkANwAAAAD//wAA/nYEjgT6AiYKagAAAQcJpwPmAAAAAAAA//8AAP7LBKoE+gImCmsAAAEHCacEIQBVAAAAAP//AAD+ywZJBPoCJgpsAAABBwmnBCEAVQAAAAD//wAA/ssErwT6AiYKbQAAAQcJpwROAFUAAAAA//8AX/52Bb0FDgImCm4AAAEHCacFLAAAAAAAAP//AAD+dgTmBPoCJgpvAAABBwmnBG4AAAAAAAD//wAA/ssEwgT6AiYKcAAAAQcJpwRcAFUAAAAA////6f3cA2QE+gImCZ0AAAAnChADowAAAQcMHwMgAV0AAAAA//8AAP6VBbME+gImCnIAAAEHCacFAQAfAAAAAP//AAD93AYzBPoCJgmgAAAAJwoQBdgAAAEHCacEIQC4AAAAAP//AAD+ywSRBPoCJgp0AAABBwmnBD8AVQAAAAD//wA5/t0FtAUPAiYKdQAAAQcJpwQaAGcAAAAA//8AAP7LBL4E+gImCnYAAAEHCacEKwBVAAAAAP//AAD/0wXXBPoCJgp3AAABBwwfAyABXQAAAAD//wAA/dwEkAT6AiYKeAAAAQcMHwMJ/5sAAAAAAAIAAP/nBO4E+gAtAC4AAAEVIRE2NjMzFSMiBgcRIxEBJyUmJjU0NjYzMhYXByYmIyIGFRQWFjMyNjcRITUBBNL+yjZyRWVLXW48pf3HYAEFeopmuHcocigMI2QmdYM6XjZKj0v9CQHPBPqS/kQlIZIpM/38AWL+hYaWHq+Ca6JXDAmVCA1sXkdhLkZNAlSS+/cAAAACAAD/DwVvBPoAJQBBAAAlASclLgInJiY1NDYzMhYXNjY1NCYnITUhFSEWFRQGBx4CFyU3BgYjIiYmNTQ2NjMyFhcHJiYjIgYVFBYzMjY3BW/8zVUBHGvTzGAiJkNAM00fJCYUDv5lBU388yKCiHDA14MBTTNAoFZfmllZomopbBkMH0slZW5rXD12Nsn+RpKLG3vHfS1QMTE/Lyogck09Yx6Skk9wmc48gpVNAalvNDZOkl9hj0sNCI0HDl9WUlkxMQAAAgAA/84DegT6ABEAFQAAAREUBiMiJiY1NDYzMxEhNSEVEwEnAQGrNS8zdko9Pzf++QLOrP3nYgInBGj9dzY8V301NTQBiZKS/Mj+nooBTQACAAD/zgOyBPoAKAApAAABBhUUFhc2MzIXByYjIgYVFBYzMjY3FwEnJSYmNTQ2NyYmNTQ3IzUhFQMBJytUSThKMioNGiBocmxhWplIVP0/YAEvgo0sKFNbFGsC7dsEaDFERlcLEAaPA1VOT1ZGTID+O4arFJl1PGwlKI9UQC+SkvyJAP//AAD82gWyBPoCJgmGAAABBwvUBLIAAAAAAAD//wAA/dwFsgT6AiYLtgAAAQcL0wSyAAAAAAAAAAMAAP/OBAoE+gADAB8AIAAAETUhFQEnJSYmNTQ2NwU1ITIXByMiBhUUFhYzMjY2NxcFA0j+DWABMXuLLyv+0wJZUSYOQnuNOVg0PWhWM1L+gARokpL7ZoatGqN1OmUlA5QDj2hcOlIlJDwzhp0AAP//AAD93AW5BPoCJgoXAAABBwoQBLUAAAAAAAAAAwAA/84FLgT6AAMAHgAiAAARNSEVASInFhYVFA4CIyIAAzcWFjMyNjU0Jic3IRUTAScBBPj+g0sTRUkxU289sv7rdJBiyndITnFoOgJwVP3nYgInBGiSkv6OAjeESz5cPR4BHQEbNPLjPjtCfzaEkv46/p6KAU0AAgAA/x8FZAT6AEkATQAAAREhIg4DFRQWFzYzMhYXFhYzMjY2NxUGBiMjFhUUDgIHFhcHJicmJjU0NjMyFhcyNjY1NCYjIgYHJiY1ND4CMzM1ITUhFQEnARcDE/6WKzggFgxANUtOS3wuHEk6SXVlQ1K/dxgFL01iM4WVQ+HEg3g6NCpWP0p8RWJTJFspfo4sVHFXwP2SBSr+wXYBl1gEaP6/BxAXIBMqQA4YJSMNChoyNrouKh0eQGJJMA0+KI9aiAU+PTA6KjUtUjZDUBMRI5tjPFQ7Ha+SkvtmcwFaagADAAD/zgUjBPoAAwAnACsAABE1IRUDBiMjDgIjIiQnNx4CMzI2NTQmIyIGByc2NjMyFhYXMzI3EwEnAQTtHmeIBA9jo2G2/u5ijDp0iVxvf2lgM10vODiHPluVXQ0RfWFU/ediAicEaJKS/ewqTnZA5N82hJdHallVWhsYlBkdP3lURv4w/p6KAU0A//8AAPzaBDYE+gImCYwAAAEHC9QD+gAAAAAAAP//AAD93AQ2BPoCJgu4AAABBwvTA/oAAAAAAAD//wAA/NoEzAT6AiYJjQAAAQcL1ARPAAAAAAAA//8AAP3cBMwE+gImC7kAAAEHC9METwAAAAAAAP//AAD82gUjBPoCJgmOAAABBwvUBLIAAAAAAAD//wAA/dwFIwT6AiYLugAAAQcL0wSyAAAAAAAA//8AAPzaBJ4E+gImCY8AAAEHC9QEPAAAAAAAAP//AAD93ASeBPoCJgu7AAABBwvTBDwAAAAAAAAAAwAA/84EwAT6ABAAHgAiAAABIxEUDgMjIiYmNREjNSEFIREUHgMzMj4CNQEBJwEEFLccO1VsQ3acTqIEFP6l/o0PHSs6JkBJJwwCB/3nYgInBGj+Kk9vVjcbWLKaAZiSkv5qSVk9JREnR1pN/l7+nooBTQACAAAAhQMsBPoAAwAXAAARNSEVAScBLgIjIgYHJzY2MzIeAhcVAyz92WIB+ENVWDg+d045V5ZLPmJjbkkEaJKS/B2KATFVTyclJ5YqJhs/cmGJAAAEAF//zgQTBQ4AJwAoACkAKgAAASQ1NDY2MzIWFhUUBgcWFjMyNjcXASclJiYnNjY1NCYmIyIGFRQWFxMlAQF+/uFFdElfnVOboDN5YlmcT0z9JGABXpnMJ7OvLU4xNUFPV+f+MwJoAyEp3UtqMlSXY4/CP1BBRU+P/iqGxRDBqiSMeEJeLzMsND4J/V30AvgAAAD//wAA/dwE8AT6AiYKaAAAAQcJuAO6AAAAAAAAAAMAV/+oA9wFDwA2ADcAOAAAAQEnJSYmNTQ2NyYmNTQ2NjMyFhYVFAYHJzY1NCYjIgYGFRQeAhc2MzIXByYjIgYVFBYzMjY3AwMD3P0yYAE/fY0kKGttS4hVR3RDKSh9KTIuJj8lJT5PKTpKMioNGiBocmxhWplIq6EBZ/5BhrQVmnI2Zygpl2pQg0ozYD0wXiVPKDInLypKLTNELBYEEAaPA1VOT1ZGTAMF+9kAAAAAAwAA/84DaAT6AAMAEAAUAAARNSEVARUUBiMiJiY1NDMhFRMBJwEDMv5xNzIwckx8AkxU/ediAicEaJKS/fpuNztVfjZpkv7O/p6KAU0AAAACAAD/5gN9BPoAFwAYAAAXJyUuAjURIzUhFSERFB4DMzI2NxcFvGABW2J6OaIClP6yDh8uRjFJkz1M/owahs0QapuQAYqSkv6LYmA3KRRGP4aQAAADAAD/6AUABPoAGwAoACkAACERASclLgM1ESM1IRUhETY2MzMVIyIGBgcRAREUHgQzMjY3EQEDCf2xYAFIN1xKI6IE5P7KMW5OZUtKUEYn/ZgGEiMnNB5Kg0L+8wGN/luE0As4ZIZtAZKSkv5KIR+SDiIh/fEEaP55PFI+MBsNPUMCK/zEAAAABAAA/84DhAT6AAMAGQAiACMAABE1IRUBJyUmJjU0NjYzMhYXByYjIgcBNjcXJQEGFRQWFjMyBwNU/YBgASyXqm/LhTOHHAx5SykjARMhKk7+8v7iVEBqQUhKBGiSkvtmhqoVs4RypVUNCI0VBf6oGCmGCAFmO29GXy2IAAMAX//OBJcFDgAmACoALgAAARUUBiMiJiY1NDYzMzU0JiYjIgYVFBYXByQ1NDY2MzIeAhUVIRUBNSEVEwEnAQKnNi0wdFE/PTwmTTo4OWlsD/6wRnZJR3dfJgGc/vUBKTb952ICJwH/SjxEU38yKy3YenE3NSo+OwWSHPBNaDEoZpZs7ZICaZKS/Mj+nooBTQAAAAACAAD/zgPABPoAFQAZAAABESEVIRUUBiMiJiY1NDYzMxEhNSEVEwEnAQGoAcT+PDYtMHRRQD03/vwDijb952ICJwRo/imSSjxEU38yKy0B15KS/Mj+nooBTQAAAAMAAP/OA5cE+gAcAB0AHgAAFyclJiYnNjY1NCcjNSEVIRYWFRQGBxYWMzI2NxclAb5gAWGVwhqEdUv8Ar7+8xwfc3Epf2JWnURO/LcBuDKGxxblvBxkVl1jkpI0bzVslyRnY0lLjpv+zgAA//8AAP3cA4wE+gImCisAAAEHChADzgAAAAAAAAADAAD/5wSBBPoAAwAsADAAABE1IRUTJiMiBgcnNjcmJiMiBhUUHgQXBy4ENTQ2NjMyFhc2MzIWFxMBJwEEHikpK2KMLJ0ZJzxiMk9bCxovT35VbnJ9ZD0dVphXVJhHerAePhcP/qlzAYMEaJKS/mMOmJYxYEUtJFpMHDY5RVRvR2thdHRqZTVdi0w4OnIJCP4q/up7ARQA//8AAP3cBdkE+gImCi0AAAEHChAF2AAAAAAAAAADAAD/zgNwBPoAAwAdAB4AABE1IRUBJyUmJjU0NjYzMhYXByYjIgYVFBYWMzI3FwUDQP2UYAEplKpqwn8wgRsMckaBjTxlPayKWP5tBGiSkvtmhqkRs4lypVUNCJUVcWVDXCuSiZkAAAQAOQBRBFgFDwAbACgALAAtAAAlJwEmJwYHJzY3JiY1NDY2MzIWFhUUBgcWFhcVATY2NTQmIyIGBhUUFgE1MxUDAfFiAgTnna7GYriFXWJMlWFYjlJRUlbpbf3BRktQSClGLVQCBKgeUYoBNjBDc3OKXk9Ckl1HeUlDfVBQjEAhNw2sAVIubz9CRiBBLj1sARiSkv2EAAAAAwAA/+YDkQT6AA4AGQAaAAABARcBJyUuAjURIzUhFQURFB4DMzI2NwcBpAGhTP0rYAFjZ305ogMh/iUOHy5GMTdlLdgEaP3hiv4nhswObJyQAYqSklr+5WJgNykUIBnKAAAAAAIAAP/nBMME+gApAC0AAAEiJwYHFhYXBy4CJyYmNTQ2NjMyFhc2NjU0JichNSEVIRYVFAcWMyEVFwEnAQMqtmk+dlTifW5y3qooGBEbOy0zTR8kJhQO/mUEj/2xIhkoSwG1Uv5yYgGeAmwZTDNs4GVuY+HKQik4IBsxIC8qIHJNPWMekpJPcF9IBJJ8/uyKAQEAAP//AAD93AVYBPoCJgp4AAABBwm4BCIAAAAAAAAABABf/3oFDgUPAEAATQBRAFUAAAEGBiMiJicOAhUUHgIzMjcmJjU0NjMyFhYVFAYHFhcHJicGIyImJjU0NjY3JiY1NDY2MzIWFhUUBgcWMzI2NwU2NjU0JiYjIgYVFBYBNSEVAycBFwSwTsVsWb1MSlMrFDBUQQ0bDQlJPDZRJzIwSmB3hDchRG2sXjxjUVhfSpNlVIxVVF5IbW/KZv0iUEofRDRHVlECFgEu+XsBVl4CsSstJB8tSE4wHkAzIgMSKA45QDJLJjlDE2BZVZhbBlORWEVzWTA5lF1IdUQ0bk9YiEISPkM5M2o+ITgiSDw+bgEQkpL7p28BXHsAAwAA/7AEsgT6AAMAJQApAAARNSEVASInFhYVFAYHFhYXByYmJyYmNTQ2MzIWFzY2NTQmJzchFQMnARcEfv4dgj54c25jL5RUQ1rMXoV5OjQtWTpQX7m4MQN84HYBVlIEaJKS/o4JPpxmY4scHT8ajyN5SwM/PjA6LTIKW0NmlzeKkv13dQFcfgAAAP//AAD+ywTuBPoCJgqdAAABBwmnBCEAVQAAAAD//wAA/w8FbwT6AiYKngAAAQcMHwMgAV0AAAAA//8AAP52A3oE+gImCp8AAAEHCacD5QAAAAAAAP//AAD+ywOyBPoCJgqgAAABBwmnBFMAVQAAAAD//wAA/NoFsgT6AiYJhgAAACcL1ASyAAABBwmnA1MA1AAAAAD//wAA/dwFsgT6AiYLtgAAACcL0wTwAAABBwmnA0UBrwAAAAD//wAA/ssECgT6AiYKowAAAQcJpwTBAFUAAAAA//8AAP3cBbkE+gImChcAAAAnChAE5wAAAQcJpwMsAJkAAAAA//8AAP60BS4E+gImCqUAAAEHCacEfQA+AAAAAP//AAD+YwVkBPoCJgqmAAABBwmnA9H/7QAAAAD//wAA/qsFIwT6AiYKpwAAAQcJpwRjADUAAAAA//8AAPzaBIYE+gImCYwAAAAnC9QESgAAAQcJpwMIAL8AAAAA//8AAP3cBKUE+gImC7gAAAAnC9MEaQAAAQcJpwL9AdwAAAAA//8AAPzaBNEE+gImCY0AAAAnC9QElQAAAQcJpwMVAKkAAAAA//8AAP3cBMwE+gImC7kAAAAnC9MEaQAAAQcJpwL9AdwAAAAA//8AAPzaBSME+gImCY4AAAAnC9QEsgAAAQcJpwNTANQAAAAA//8AAP3cBSwE+gImC7oAAAAnC9ME8AAAAQcJpwNFAa8AAAAA//8AAPzaBL4E+gImCY8AAAAnC9QEggAAAQcJpwMQAL0AAAAA//8AAP3cBKUE+gImC7sAAAAnC9MEaQAAAQcJpwL9AdwAAAAA//8AAP6UBMAE+gImCrAAAAEHCacEUgAeAAAAAP//AAD+/AMsBPoCJgqxAAABBwmnBBsAhgAAAAD//wBf/ssEEwUOAiYKsgAAAQcJpwTVAFUAAAAA//8AAP3cBTYE+gImCmgAAAEHC7UEAAAAAAAAAP//AFf+rQPcBQ8CJgq0AAABBwmnBJkANwAAAAD//wAA/nYDaAT6AiYKtQAAAQcJpwPmAAAAAAAA//8AAP7LA30E+gImCrYAAAEHCacEIQBVAAAAAP//AAD+ywUABPoCJgq3AAABBwmnBCEAVQAAAAD//wAA/ssDhAT6AiYKuAAAAQcJpwROAFUAAAAA//8AX/52BJcFDgImCrkAAAEHCacFLAAAAAAAAP//AAD+dgPABPoCJgq6AAABBwmnBG4AAAAAAAD//wAA/ssDlwT6AiYKuwAAAQcJpwRcAFUAAAAA//8AAP3cA4wE+gImCisAAAAnChADzgAAAQcJpwQzAe4AAAAA//8AAP6VBIEE+gImCr0AAAEHCacFAQAfAAAAAP//AAD93AXZBPoCJgotAAAAJwoQBdgAAAEHCacEIQC4AAAAAP//AAD+ywNwBPoCJgq/AAABBwmnBD8AVQAAAAD//wA5/t0EWAUPAiYKwAAAAQcJpwQaAGcAAAAA//8AAP7LA5EE+gImCsEAAAEHCacEKwBVAAAAAP//AAD/0wTDBPoCJgrCAAABBwwfAyABXQAAAAD//wAA/dwFWAT6AiYKeAAAACcJuAQiAAABBwmnAwn/mwAAAAAAAQAA/dwEXgT6AFEAAAUuAjU0NyY1ND4CMyE1ITUhFSMRISIGBhUUFhc2MzIeAhUUByc2NjU0JiMiBhUUFhYXNjMyFhYVFAYGIyImJic3HgIzMjY1NCYmIyIGBwHNfJRHc1syVW5TAQ79HARe1f5IQD8mLyheZWyZYi3rMj5AfIKRkj58UjY3ZZNNV6ZygNi1Vn5Vjp5kbGwuSysvVzNmRIGQWXxMUXA/Vzgar5KS/r8QLiMmPRMWK0deM8FJhBE4MEE8UlA9YV0qCkiBVlOBR1KWcFdreDpOSys7HRMYAAABAAD93ASgBPoAVgAAAQYjIiYmNTQ2NyYmNTQ3JjU0PgIzITUhNSEVIxEhIgYGFRQWFzYzMh4CFRQHJzY2NTQmIyIGFRQWFhc3Mh4GFwcuAiMiBhUUHgIzMjY3AvhxcWWUTkdCc29zWzJVblMBDv0cBF7V/khAPyYvKF5lbJliLesyPkB8gpGSKHBkFBxmVE1JTElLKn1QkqdiZW0cLjwfLlQ3/gsvSIRVSHcmTapvfExRcD9XOBqvkpL+vxAuIyY9ExYrR14zwUmEETgwQTxSUDBVYjUBChchLD1NYUBQeJZQTkQnNSEOEhoAAAMAAP5HBIUE+gBIAEkASgAABQYjIiYmNTQ2NjcmJiMiBhUUFhYXBy4CNTQ2NyYmNTQ+AjMhNSE1IRUjESEiBgYVFBYXNjMyFhYVFAYHBhUUHgIzMjY2NwEBBH9tfGSUTzNwTgR+gZCTP56ubre6UENANjUyVW5TAQ79HASF/P5IQD8mMytTaX62YAgD6RwuPB8oODwk/pkBO7AzRH1RNWlWEE5Sg4BNjK+Wa5vYwWtWkzQsZjw/Vzgar5KS/r8QLiMnPxMZTYtVJkMOD40hLhwNCRUSBRz8TwAAAAADAAD+AATNBPoAWgBbAFwAAAEGBiMiJiY1NDcmJjU0NjY3JiYjIgYVFBYWFwcuAjU0NjcmJjU0PgIzITUhNSEVIxEhIgYGFRQWFzYzMhYWFRQGBwYGFRQWMzI2NxcGIyInBgYVFBYzMjY3AQEEzTBzRGqUSR83ODFwUAR+gZCTO3lrbH6QSUNANjUyVW5TAQ79HASF/P5IQD8mMytTaX62YAgDdXRZTD1WLTRtfCUhDwtVUTZSN/5JASf+MhUdRnlOOzAkaT4wYlAOTlKDgEuFjmFucrK3aFaTNCxmPD9XOBqvkpL+vxAuIyc/ExlNi1UmQw4JRTw1OBYVhjMFECIUNjgSFgZE/E8AAAAAAgAA/dwEXgT6AE8AXwAABS4CNTQ3JjU0PgIzITUhNSEVIxEhIgYGFRQWFzYzMh4CFRQHJzY2NTQmIyIGFRQeAhc2MzIWFhUUBgYjIiYnNxYWMzI2NTQmIyIGByUyFhYVFAYGIyImJjU0NjYCAZGqUHNbMlVuUwEO/RwEXtX+SEA/Ji8oXmVsmWIt6zI+QHyCkZIcQG5QU0ZklE9UmGGV5m1fVax3YGdkVi1mJf6LHjMgITMdHjMgHzSCS4qXXnxMUXA/Vzgar5KS/r8QLiMmPRMWK0deM8FJhBE4MEE8UlArRUdNKBFIg1RSgkdgZmNNSk9KPkUVEIQfOCAhOB4fNyEgOB8AAAAAAgAA/dwFGAT6AFMAYwAAAQYjIiYmNTQ2NyYmNTQ3JjU0PgIzITUhNSEVIxEhIgYGFRQWFzYzMh4CFRQHJzY2NTQmIyIGFRQWFhc2MzIeAhcHLgIjIgYVFB4CMzI2NwEyFhYVFAYGIyImJjU0NjYDcHFxZZROKSiWjHNbMlVuUwEO/RwEXtX+SEA/Ji8oXmVsmWIt6zI+QHyCkZIte241QGCmj4pUfVCSp2JlbRwuPB8uVDf9Wh4zICEzHR4zIB80/gsvSIRVN18lWbt8fExRcD9XOBqvkpL+vxAuIyY9ExYrR14zwUmEETgwQTxSUDJZZjgOMVqPf1B4llBORCc1IQ4SGgFEHzggITgeHzchIDgfAAD//wAA/hEEhQT6AiYK7wAAAQcMHwMJ/5sAAAAA//8AAP4ABM0E+gImCvAAAAEHDB8DCf+bAAAAAAABAAD93ASQBPoAVQAABS4DNTQ2NyY1ND4CMyE1ITUhFSMRISIGBhUUFzYzMhYVFAYHJzY2NTQnBSclJiMiBhUUFhYXNjMyFhYVFAYGIyImJic3HgIzMjY1NCYmIyIGBwH/cp9UJDs0VzJVblMBQPzqBJDV/hZAPyZJYnnd5i8vhS4mCv6iXQFcTYWXmDJ8aEVGZZNNV6ZygNi1Vn5Vjp5kbGwuSysvVzOBOnhrZjxFaidQbT9XOBqvkpL+vxAuI0EtHbWjPoc7UTdYNBwe8o3AIV5aM15lMxBIgVZTgUdSlnBXa3g6TksrOx0TGAAAAAABAAD93AS+BPoAVgAAAQYjIiYmNTQ2Ny4CNTQ2NyY1ND4CMyE1ITUhFSMRISIGBhUUFzYzMhYVFAYHJzY2NTQnBSclJiMiBhUUFhc2MzIeAhcHLgIjIgYVFB4CMzI2NwMWcXFllE4/O11sKDs0VzJVblMBQPzqBJDV/hZAPyZJYnnd5i8vhS4mCv6iXQFcTYWXmIF8Ghpgpo+KVH1QkqdiZW0cLjwfLlQ3/gsvSIRVRHImPnxvPEVqJ1BtP1c4Gq+Skv6/EC4jQS0dtaM+hztRN1g0HB7yjcAhXlpTiUADMVqPf1B4llBORCc1IQ4SGgACAAD/5wSeBPoANgA3AAABIRYVFAc2MzIWFhUUBgcnNjY1NCYjIgYHBgYHFhYXBy4FNTQ2NjMyFhc2NjU0JichNSEhBJ798SIHMTRMgUtDPpQzPUk/IkIrKYZmU/OMblWus3UvDxs7LTRNH09JEhD+FgSe/UwEaE9wJhoVRX9TXLFKVTWFO0ZPEBc5XSpj421uRqC6kFA0HRsxIC8qKXlSKF8ikgAAAAACAAD/5wZpBPoARQBGAAABBiMiJiY1NSYnBgYHFhYXBy4FNTQ2NjMyFhc2NjU0JichNSEVIRYVFAcWFzY2MzIeAhcHJiYjIgYGFRQWMzI2NwEEr1ZeUIJMRFMue0dT84xuVa6zdS8PGzstNE0fT0kSEP4WBmn8JiIXOzIojFpdmX9nLIJgtW05TSRNRR07JP1tAWciSIVUCSYHMEcdY+NtbkagupBQNB0bMSAvKil5UihfIpKST3BMPg0XQENCcphXRbemKEEnRUcJDwMLAAAAAwAA/dwEiAT6AFAAUQBSAAAFNjcmJwYGIyIuAzU0PgQzMzUhNSEVIREjIgYGFRQWFjMyNyYmNTQ2MzIWFhUUBgcWFxYWFRQGBiMiJiYnNx4CMzI2NTQmJiMiBgcDEwJfTVo0FhJAEV+ne08qGTtYbYpQCv2kBEb+u2OepldJl28PHgcFTTw5UygxOCBEaXRXpnKA2LVWflWOnmRsbC5LKy9XMzjxGSMKVDADBipKYntNOV9bQysXw5KS/q02cFZKcUADEigOP0QwSSg4TBY/ZxiSalOBR1KWcFdreDpOSys7HRMYBZv7HAAAAAACAAD93AVuBPoAUwBUAAABBiMiJiY1NDY2MzMmJwYGIyIuAzU0PgQzMzUhNSEVIREjIgYGFRQWFjMyNyYmNTQ2MzIWFhUUBgcWFx4CFwcuAiMiBhUUHgIzMjY3AQPGcXFllE5VoGkJLxkSQBFfp3tPKhk7WG2KUAr9pARG/rtjnqZXSZdvDx4HBU08OVMoMTgzPViYgEd9UJKnYmVtHC48Hy5UN/7K/gsvSIRVTYFLTDYDBipKYntNOV9bQysXw5KS/q02cFZKcUADEigOP0QwSSg4TBZfWBtpjWxQeJZQTkQnNSEOEhoGYAAAAAQAAP3cBJIE+gBMAE0ATgBPAAAFDgMVFB4CMzI2NxcGBiMiJiY1NDY3JicGBiMiLgM1ND4EMzM1ITUhFSERIyIGBhUUFhYzMjcmJjU0NjMyFhYVFAYHFhcBARcDkylGNxwcLjsgOV5RNEiFRGWUTmplKCUSQBFfp3tPKhk7WG2KUAr9pARG/rtjnqZXSZdvDx4HBU08OVMoMThCXP53AXUPXQsdLDglIzIhDxkqjiQiSIRVWoslRE0DBipKYntNOV9bQysXw5KS/q02cFZKcUADEigOP0QwSSg4TBZ7fAVA+yqH//8AAP/TBJ4E+gImCvcAAAEHDB8DIAFdAAAAAP//AAD/0wZpBPoCJgr4AAABBwwfAyABXQAAAAAABAAA/dwEiAT6AE0AXQBeAF8AAAU2NyYnBiMiLgM1ND4EMzM1ITUhFSERIyIGBhUUFhYzMjcmJjU0NjMyFhYVFAYHFhcWFhUUBgYjIiYmJzceAjMyNjU0JiMiBwEyFhYVFAYGIyImJjU0NjYBAQKcKTwiIjk4XKF4TioZO1htilAK/aQERv67Y56mV0iVbBUeBwVNPDlTKDE4LzBrd0uRY3O8mUx+SHSAUVNeTkRKRf4fHjMgITMdHjMgHzQBiwEDDhIKPkkJKkpifU45X1tDKxfDkpL+rTZwVkpxQAMSKA4/RDBJKDhMFllGEpdyU4FHU5RxWG53OVBJPkUdAQ4fOCAhOB4fNyEgOB8Ef/scAP//AAD93AVuBPoCJgr6AAABBwmnA0sBAAAAAAD//wAA/dwEkgT6AiYK+wAAAQcJpwOiAJwAAAAA///+QAAAAjAHLAImCasAAAEHCW8CsgBaAAAAAAAB/kAAAAJvBywALwAAASYmNTQ2NjMyFzY2MzIWFwcmJiMiBhUVFx4CFzMVIxEjESM1My4CIyIGFRQWF/6IJCRLkWOhbCaGXURsKi0jTS1QWRUJEhEH7NWltqkhTWlGUVcfJQTrR4RFWIpPi0ZFHRaHExlaTRA3GDhBJZL7mARoko21XmRZOHBKAAAAAAL+QAAAAm8HLAAvADsAAAEmJjU0NjYzMhc2NjMyFhcHJiYjIgYVFRceAhczFSMRIxEjNTMuAiMiBhUUFhcBMhYVFAYjIiY1NDb+iCQkS5FjoWwmhl1EbCotI00tUFkVCRIRB+zVpbapIU1pRlFXHyUCoik9PSkpPT0E60eERViKT4tGRR0WhxMZWk0QNxg4QSWS+5gEaJKNtV5kWThwSgFGOysrOzsrKzsAAAAE+9wE+v/7BywADgAeAB8AIAAAAQYGIyImJzcWFjMyNjY3BTIWFhUUBgYjIiYmNTQ2NgMB/v0ovp6NzkKRMHxeQlk5FwEoHjMgITMdHjMgHzTI/t4G6Ly7vLsxlIc/d2igHzggITgeHzchIDgf/n4CMgAAAAL7yATrAF4HLAAhACIAAAEmJwYjIiYnNx4CMzI2NjcXNjYzMhYXByYmIyIGFRQWFyf+rU0SXZSPwUWRIUZcPUFWNRV3Kms+RGwqLSNNLVBZMTmxBOuCaWW1wjFneDxCeGQrHh0dFocTGVhNO39UDwAD+8gE6wBeBywAIQAtAC4AAAEmJwYjIiYnNx4CMzI2NjcXNjYzMhYXByYmIyIGFRQWFxMyFhUUBiMiJjU0NgP+rU0SXZSPwUWRIUZcPUFWNRV3Kms+RGwqLSNNLVBZMTlkKT09KSk9PewE64JpZbXCMWd4PEJ4ZCseHR0WhxMZWE07f1QBRjsrKzs7Kys7/skAAAAAA/wzBOv/3wcsABkAKQAqAAABLgIjIgcGIyImJic3HgIzMjc2MzIWFhcTMhYWFRQGBiMiJiY1NDY2A/6kGDFDNRsbGhtQcV4mhx8yQzQbISIlT25XJi8eMyAhMx0eMyAfNKwE62lpLQIDMXprMVRDHgQDSKyfAhMfOCAhOB4fNyEgOB/9/AAAAAL8MwTrAGUHLAAoACkAAAEuAiMiBwYjIiYmJzceAjMyNzYzMhc2NjMyFhcHJiYjIgYVFBcWFyf+pBgxQzUbGxobUHFeJocfMkM0GyEiJTUpG55yQ2spLSNNLVBZAigjmwTraWktAgMxemsxVEMeBAMQXGIdFocTGVhNFBJTlQ8AA/wzBOsAZQcsACgANAA1AAABLgIjIgcGIyImJic3HgIzMjc2MzIXNjYzMhYXByYmIyIGFRQXFhcTMhYVFAYjIiY1NDYD/qQYMUM1GxsaG1BxXiaHHzJDNBshIiU1KRueckNrKS0jTS1QWQIoI4EpPT0pKT098wTraWktAgMxemsxVEMeBAMQXGIdFocTGVhNFBJTlQFGOysrOzsrKzv+yQAAAAAD/LoE6//7BywAEAAgACEAAAEuAiMiBgcnNjYzMh4CFxMyFhYVFAYGIyImJjU0NjYD/qUsU2RII0ArMi1ZNFeBaV4tSh4zICEzHR4zIB80yATrqLNPCxGVEA47gd6nAh0fOCAhOB4fNyEgOB/98gAAAvy6BOsAaAcsACEAIgAAAS4DIyIGByc2NjMyFhczNjMyFhcHJiYjIgYVFRQXFhcn/qUjQEVONSNAKzItWTRqfS8FNslEbCotI00tUFkBJCacBOuDpFsoCxGVEA5NTpsdFocTGVhNGAcFXowPAAAAAAP8ugTrAGgHLAAhAC0ALgAAAS4DIyIGByc2NjMyFhczNjMyFhcHJiYjIgYVFRQXFhcTMhYVFAYjIiY1NDYD/qUjQEVONSNAKzItWTRqfS8FNslEbCotI00tUFkBJCaDKT09KSk9PfYE64OkWygLEZUQDk1Omx0WhxMZWE0YBwVejAFGOysrOzsrKzv+yQAAAAP8kwTr//sHLAAiADIAMwAAAS4DIyIHJzY2MzIeAxc3LgIjIgYHJzY2MzIeAhcTMhYWFRQGBiMiJiY1NDY2A/6SLUg/PTFJXjYvZj0pSEI1IhIJK05YQSNAKzAxUzZXgWldLkoeMyAhMx0eMyAfNMgE60BIIQwsjhMaESQsJR8DbXMxCxGNEA07gNurAhMfOCAhOB4fNyEgOB/9/AAAAAAC/JME6wBoBywAMQAyAAABLgMjIgcnNjYzMh4DFzcuAiMiBgcnNjYzMhYXMzYzMhYXByYmIyIGFRQXFhcn/pItSD89MUleNi9mPSlIQjUiEgkrTlhBI0ArMDFTNmh/LwU2yURsKi0jTS1QWQMjJZwE60BIIQwsjhMaESQsJR8DbXMxCxGNEA1OTZsdFocTGVhNFRVbiQ8AAAP8kwTrAGgHLAAxAD0APgAAAS4DIyIHJzY2MzIeAxc3LgIjIgYHJzY2MzIWFzM2MzIWFwcmJiMiBhUUFxYXEzIWFRQGIyImNTQ2A/6SLUg/PTFJXjYvZj0pSEI1IhIJK05YQSNAKzAxUzZofy8FNslEbCotI00tUFkDIyWDKT09KSk9PfYE60BIIQwsjhMaESQsJR8DbXMxCxGNEA1OTZsdFocTGVhNFRVbiQFGOysrOzsrKzv+yf///e4AAAIwBywCJgmpAAABBwsEAhIAAAAAAAD///3aAAACcAcsAiYJqQAAAQcLBQISAAAAAAAA///92gAAAnAHLAImCakAAAEHCwYCEgAAAAAAAP///kUAAAIwBywCJgmpAAABBwsHAhIAAAAAAAD///5FAAACdwcsAiYJqQAAAQcLCAISAAAAAAAA///+RQAAAncHLAImCakAAAEHCwkCEgAAAAAAAP///swAAAIwBywCJgmpAAABBwsKAhIAAAAAAAD///7MAAACegcsAiYJqQAAAQcLCwISAAAAAAAA///+zAAAAnoHLAImCakAAAEHCwwCEgAAAAAAAP///qUAAAIwBywCJgmpAAABBwsNAhIAAAAAAAD///6lAAACegcsAiYJqQAAAQcLDgISAAAAAAAA///+pQAAAnoHLAImCakAAAEHCw8CEgAAAAAAAAAD/hUE6wArBywAFAAgACEAAAEmJjU0NjYzMhYXByYmIyIGFRQWFxMyFhUUBiMiJjU0NgP+ejE0Uo9bRGwqLSNNLVBZMTlkKT09KSk9PbkE61CbQlh+Ph0WhxMZWE07f1QBRjsrKzs7Kys7/skAAAD//wA8AAAGOwcsAiYJcgAAAQcLBwYdAAAAAAAA//8AAP8fA/UHLAImCXQAAAEHCxwDygAAAAAAAP//AAD+iASLBywCJgl8AAABBwsEBG0AAAAAAAD//wAA/ogEiwcsAiYJfAAAAQcLBwSAAAAAAAAA//8AAP6IBIsHLAImCXwAAAEHCwoEgAAAAAAAAP//ADwAAAhNBywCJglzAAABBwsECC8AAAAAAAD//wA8AAAITQcsAiYJcwAAAQcLBwgvAAAAAAAA//8APAAACE0HLAImCXMAAAEHCwoILwAAAAAAAP//ADwAAAhNBywCJglzAAABBwsNCC8AAAAAAAD//wA8AAAGOwcsAiYJcgAAAQcLBAYiAAAAAAAA//8APP84BjsHLAImCXIAAAAnCwcGHQAAAQcJpwR/AMIAAAAA//8AAP5jA/UHLAImCXQAAAAnCxwDygAAAQcJpwPR/+0AAAAA//8AAP6IBIsHLAImCXwAAAAnCwQEbQAAAQcJpwOfAIQAAAAA//8AAP6IBIsHLAImCXwAAAAnCwcEgAAAAQcJpwOfAIQAAAAA//8AAP6IBIsHLAImCXwAAAAnCacDnwCEAQcLCgSAAAAAAAAA//8APP84CE0HLAImCXMAAAAnCwQILwAAAQcJpwR/AMIAAAAA//8APP84CE0HLAImCXMAAAAnCwcILwAAAQcJpwR/AMIAAAAA//8APP84CE0HLAImCXMAAAAnCacEfwDCAQcLCggvAAAAAAAA//8APP84CE0HLAImCXMAAAAnCacEfwDCAQcLDQgvAAAAAAAA//8APP84BjsHLAImCXIAAAAnCwQGIgAAAQcJpwR/AMIAAAAAAAIAAP/nBxsE+gAyADMAAAEVIRE2NjMyFhYVFAYHJzY2NTQmIyIGBxEjESMiDgIVFBYWFwcuAjU0NjcFNSERITUhBxv9ZTN2SVeITkVAlDRAUEg3cjml7l5nRR1AfmlukYRJLCX+2wOO/CUD2wT6kv5iMjZLlmdo1V5VQq1TXl9MSf3yAqAWMUMwQXyDVGt9m5xVMl4hA5QBNpIAAAQAAAAACgYE+gBDAFQAZABlAAABJiY1ND4CMzM1ITUhFSMRIzUGIyImJic2NjU0LgIjISIGBhUUFhc2MzIWFhUUBgYjIiYmJzceAjMyNjU0JiMiBgEVITIWFhUUBgceAjMyNxEBMhYWFRQGBiMiJiY1NDY2EwKAfo4sVHFX2fxrCgbVpIGLbrZ/HH2AEytDTvzHQD8mQDVKZ2upYGG8g5X74WKNUbPFd4CAbl4xYAGRAceCkk1lcCBKWD6MdPxuHTEdHTEdHTEdHTHkAbAjm2M8VDsdr5KS+5jKQlm0hA9TQRonHQ0QLiMqQA4YRYhdW5BRZea+Qp/BVFZVRE4SAqavQXpHZnsoOUEaXwLv/ooeNR4eNR4eNR4eNR7+lQAAAAMAAAAACh4E+gBeAHEAcgAAAQcmJjU0Njc1ITUhFSMRIzUGBiMiJiYnNjY1NC4CIyMiJxYVFAYGBCMiJiY1NDY3JiY1NDY2MzIWFwcmJiMiBhUUFhc2MzIXByYjIgYVFBYzMiQ2NTQmJiMiBhUUFhMVFhchMhYWFRQGBxYWMzI2NxEBBEdDmqNzafxdCh7VpDyLVGWwgBt9gBMrQ05jXTQlY8L+7KKBtVkfIVtcVJdiKWgXDBlKK1pcTVI6Sy4oDRcdYGJ6cJsBC5QrUTc9Q3RvHSYBjoKSTWVwKXBXSYs8/TUCFoErpnNliRKPkpL7mNQkKFi3gg9TQRonHQ0ETWhz4LlqTYdZLFslLIZUUXQ6DQiNBw5CQjRHFBMGjwNJRk9RhOOEPmc7RDpHZgI4lgcSQXpHZnsoSUs6NALg/R8AAwAA/dwEJgT6AEIAQwBEAAABIyIOBBUUHgIzMjY3FwYHFSMiDgQVFB4CMzI2NxcGBiMiJiY1NDY2MzM1IyImJjU0NjYzMzUhNSEVIScDAveOQ19TPCMSL1V1R1elZDh4bI5DX1M8IxIvVXVHV6VkOGPOYJLgeXTcjxQIkuB5dNyPFP2uBCb+0aULA1IKGCYqNR86TjAVMDacORfpChgmKjUfOk4wFTA2nDExWKVxaZZPRlilcWmWT4WSkpL44gAABAAA/NoErAT6AEsATABNAE4AAAEjIgYGFRQWMzI2NxcGBxUjIgYGFRQWMzI2NxcGBxYWFwcmJiMiBhUUFjMyNxcGIyImNTQ2NyYmNTQ2MzM1IyIkNTQ2MzM1ITUhFSEnAzcC97R/hD2WqlWnZDhvdbR/hD2WqlWnZDiUgH7of3175oppaVlMXVw0bnSZrnZvq7b52SEI6f7++dkh/a4EJv7RpQ8BA2ArSi9ZUioymzEW4CtKL1lSKjKbPhEWnLFIn40/NjY5KI8qjXdXgRgYpoOOnj+tnY6eeJKSkvnRFQAEAAD93AR+BPoANgBDAEQARQAAASMiDgQVFB4CMzI2NxcGBxUeAhUUBCMiLgI1NDY2MzM1IyImJjU0NjYzMzUhNSEVIQMjIgYVFBYzMjY1NCYDAwL3jkNfUzwjEi9VdUdXpWQ4eGxonVP+/u1wvYhMe+SNBwiS4Hl03I8U/a4Efv55OT27w7ClqKt54AsDUgoYJio1HzpOMBUwNpw5F2cdaYpOo6wtXIlcY5hTRlilcWmWT4WSkvvoZ2dhamBdS3QEx/jiAAAAAAUAAPzaBKwE+gBDAFAAUQBSAFMAAAEGIyImNTQ2NyYmNTQkMzM1IyIkNTQ2MzM1ITUhFSERIyIGBhUUFjMyNjcXBgcVFhYVFAYHFhYXByYmIyIGFRQWMzI3AyMiBhUUFjMyNjU0JgMDNwMEbnSZrnpwsbUBDdURCOn+/vnZIf2uBH7+ebR/hD2WqlWnZDhvdaS0vLuB23h9e+aKaWlZTF1cEj3Bvaqrqap93A8B/QQqjXdbgBYaooSKoj+tnY6eeJKS/vgrSi9ZUioymzEWXSqpaIKVEhqep0ifjT82NjkoAxhVV1RUS1I/YgRl+dEVAAMAAAAACDoE+gA0AEUARgAAASMiBgYVFB4CMzI2NjcXBgYjIiYmNTQ2NjMzESE1IRUjESM1BgYjIiYmJzY2NTQuAiMhJTIWFhUUBgcWFjMyNjcRIRUTAvdwhKdZNFh0QD5rY1Q4Y85gi+R8geSGCP2uCDrVpDyLVGWwgBt9gBMrQ07+0wE+gpJNZXApcFdJizz8Nv8Cy0J8VUttRiITJSuZMTFyzoJ7u2QBDJKS+5jUJChYt4IPU0EaJx0NkkF6R2Z7KElLOjQC4K/9zgAAAAAFAAD93ATMBPoAKgA3AEQARQBGAAABIRUeAhUUBgcVHgIVFAQjIi4CNTQ2NjMzNS4DNTQ2NjMzNSE1IQEjIgYVFBYzMjY1NCYDIyIGFRQWMzI2NTQmAwMEzP4raJ1Tr6lonVP+/u1wvYhMe+SNB263hEp75I0H/a4EzP3yPbvDsKWoq3l0PbvDsKWoq3ngCwRolB1pik6FpxlfHWmKTqOsLVyJXGOYU0YBLlyIW2OYU4WS/lhnZ2FqYF1LdP0bZ2dhamBdS3QEx/jiAAAEAAAAAAklBPoAKwA8AEwATQAAASInFRYWFRQGBiMiJiY1NDY2MzMRITUhFSMRIzUGBiMiJiYnNjY1NC4CIwEhFSEyFhYVFAYHFhYzMjY3ASMiBgYVFB4CMzI2NTQmAQPydCN5e3vhk5HugoDmhgf9rgkl1aQ8i1RlsIAbfYATK0NOAp37SwIpgpJNZXApcFdJizz7Ekhvpl40W31JobJ3Aa0DFgcCScR4g7hbcc2EebxlAQySkvuYwyQoWLeCD1NBGicdDQFSwEF6R2Z7KElLOjQBVD5+WkttSCOHgGOd/t0AAAAGAAD93AUjBPoATwBcAF0AXgBfAGAAAAEmJjU0NjYzMzUhNSEVIxEhIg4CFRQWFzYzMhYWFRQGBgcVIyIGFRQWFyY1NDY2MzIWFhUUBgYjIiYmNTQ2NyYkJzceAjMyNjU0JiMiBhMzMjY1NCYjIgYVFBYTARMnAoCAjEqJWPb8awUj6f59PTUjED41UGpupVpKlmeAxsx7eRlCjmdbgj5hxI6h+IX96sD+13SJUrTGeH+BamIxYDUWeXg9P1BXDcb+sr8hAh8jhlZDXC1+kpL+8AgTGQ8eMw4aO3FPRG9KC3pmYl1nDzo6N146O141UHdBVqVxj6gIGeTLQ42pSj8/MjgS/H1AOiYqOzcYKgY2+OIC7hoAAAAAAwAA/dwFIwT6AFgAWQBaAAAFJiY1NDY3JgM3HgIzIDU0JiMiBgcmJjU0NjYzMzUhNSEVIxEhIg4CFRQWFzYzMhYWFRQGBiMiJwYGFRQWFzYzMhYWFRQGBiMiJCc3HgIzIDU0JiMiBhMBAoCAjBsW05mJUrTGeAEAZ2UxYCmAjEqJWPb8awUj6f59PTUjED41UGpupVpfvYRiUCQkPjVQam6lWl+9hPD+pIeJUrTGeAEAZ2UxYOz+snEjhlYoPBZvAQ1DjalKiTQ/EhIjhlZDXC1+kpL+8AgTGQ8eMw4aPXZST35JEQgiFx4zDho9dlJPfknm6UONqUqJND8SBVn44gAAAAAEAAD82gVPBPoAZQBmAGcAaAAAJSYmNTQ3JiYnNxYEMzI2NTQmIyIHJiY1NDYzMzUhNSEVIxUhIgYGFRQXNjMyFhUUBiMiJwYVFBc2MzIWFRQGBxYWFwcmJiMiBhUUFjMyNxcGIyImNTQ2NyYkJzcWBDMyNjU0JiMiEwEnAoB/jSpks06JcQEOxYCAa2FlVX+NoYr2/GsFI+n+fUo4I3NRaazB2cdGQXNzUWmswZ6WeOB/fXvmimlpWUxdXDRudJmufHWr/uVwiXEBDsWAgGthZcD9fR8DIH1QQi0wq3pFrac4OCwyISB9UF1iapKS/AkaEjAfGIFxdIYIAjIwHxiBcWGCERiYsEifjT82NjkojyqNd1qCFxnKsUWtpzg4LDIE1vnLGwAAAAMAAAAACXAE+gBEAFUAVgAAASYmNTQ+AjMzNSE1IRUjESM1BgYjIiYmJzY2NTQuAiMhIgYGFRQWFzYzMhYWFRQGBiMiJiYnNx4CMzI2NTQmIyIGARUhMhYWFRQGBxYWMzI2NxEBAoB+jixUcVfZ/GsJcNWkPItUZbCAG32AEytDTv1dQD8mQDVKZ2upYGG8g5X74WKNUbPFd4CAbl4xYAGRATGCkk1lcClwV0mLPP01AbAjm2M8VDsdr5KS+5jUJChYt4IPU0EaJx0NEC4jKkAOGEWIXVuQUWXmvkKfwVRWVUROEgKmr0F6R2Z7KElLOjQC4P0fAAAABQAA/dwEngT6AEEATgBbAFwAXQAAJSMiBhUUFhcmNTQ2NjMyFhYVFAYGIyImJjU0NjY3NS4CNTQ2Njc1ITUhFSERIyIGFRQWFyY1NDY2MzIWFhUUBgcnMjY1NCYjIgYVFBYXEzI2NTQmIyIGFRQWFwMDAxWAxsx7eRlCjmdbgj5hxI6i+ISG75yg836G75z9kASe/neAxsx7eRlCjmdbgj6TiHF5eD0/UFcNDxZ5eD0/UFcNDx4pU2ZiXWcPOjo3Xjo7XjVQd0FWpXFrl00DRAFaom9rl00Dg5KS/u1mYl1nDzk7N146O141ZIMWfkA6Jio7NxkqFfz+QDomKjs3GCoWBkz44gAABAAAAAAI+AT6ADcASABVAFYAAAEhIg4CFRQWFyY1NDY2MzIeAhUUBgYjIiYmNTQ2Njc1ITUhFSMRIzUGBiMiJiYnNjY1NC4CJzIWFhUUBgcWFjMyNjcRIRUDNjY1NCYjIgYGFRQWJQTi/eJzl3s8gngfQYliTG5JI2vKh5b3iH/zn/2QCPjVpDyLVGWwgBt9gBMrQz2Ckk1lcClwV0mLPPuWf3aJRzkvSioRAjEDASBUdEWCoRtOXkZ5TS1KYTRek1F32IqAxHAE15KS+5iuJChYt4IPU0EaJx0NkkF6R2Z7KElLOjQDBtX8+gJiUT1GKEwyKUWwAAABAAD/5wWRBPoAHQAAARUjESMRIyIOAhUUFhYXBy4CNTQ2NwU1IREhNQWR1aXuXmdFHUB+aW6RhEksJf6fA8r76QT6kvuYAqAWMUMwQXyDVGt9m5xVMl4hA5QBNpIAAAACAAD/5wQXBPoAAwAZAAARNSEVAyIOAhUUFhYXBy4CNTQ2NwU1IRUD9cxeZ0UdQH5pbpGESSwl/p8DygRokpL+OBYxQzBBfINUa32bnFUyXiEDlJIAAAAABwAA/NoF1gT6ADsAWABZAFoAWwBcAF0AAAEDDgMjIiYmNTQ2NyYmNTQ2NjMyFz4CMzM1ITUhFSERIyIGBhUUFhYzMjcmJjU0NjMyFhYVFAYHEwEGIyImJicmIyIGFRQXNjMyFwcmIyIGFRQWMzIkExMDEREE5HQweZGsY3itWCEkTFJVlmIlLAaF15EK/BQF1v67Y6SmUUmXbwweBAVNPDlTKD0+rf67HiZ80YcYLCVeXIIzPCknDRQZVFpwXpIBCjfks/6tAUI+c1g1RnxQK1olJ3NMTXA5BneiScOSkv6tOm5USnFAAxIoDj9EMEkoO08W/jsBrQRHhl0HPztXJA8GiwNDPUNMqAUV+wb93AEi/dwABgAA/NoERgT6AD0APgA/AEAAQQBCAAABAwYjIicRFAYjIiYmNTQ2MzM1JiY1ND4EMzM1ITUhFSERIyIGBhUUFhYzMjcmJjU0NjMyFhYVFAYHEwETAxERA1SwKiFlUC8rLWdHODgxWFkZO1htilAK/aQERv67Y56mV0mXbwweBAVNPDlTKD0+rf565LP+rQHnBRX+ojA2SXMyMS61N6dtOV9bQysXw5KS/q02cFZKcUADEigOP0QwSSg7Txb+OwYU+wb93AEi/dwAAAAACgAA/NoFJwT6ADYAQQBLAEwATQBOAE8AUABRAFIAAAEDDgIjIiYmNTQ2NyY1ND4EMzM1ITUhFSERIyIGBhUUFhYzMjcmJjU0NjMyFhYVFAYHEwEGBwE2NjcGIyImEwEGFRQeAjMyARMDERETAQQ1dkeGr2lztGKOgSAZO1htilAK/MMFJ/67Y56mV0mXbwweBAVNPDlTKD0+rfz1KSABTi5cOCcedMtM/sYfJ0BVLjkBLOTMS/4I/q0BRlFjPlOTXXipIUdgOV9bQysXw5KS/q02cFZKcUADEigOP0QwSSg7Txb+OwIlBgz+1h5bTAVA/sgBGC47NEktFQVz+wb93AEi/dwCswFhAAAAAAcAAPzaBnEE+gBcAF0AXgBfAGAAYQBiAAABAwUXFhUUBiMiJiY1NDY3NycuAiMiBhUUFjMyNjcXBiMiJiY1NDY2MzIeAhcXJTY3LgI1ND4CMzM1ITUhFSERIyIGBhUUFhYzMjcmJjU0NjMyFhYVFAYHEwETAREREwV/mP4RGA04LDN4UCQsMkYdNz8zMkA0Lh07JyRPUUBpO0J4QT5ZSUIfSQEcJCqEwmJRiLJoCvt5BnH+u2OkplFJl28MHgQFTTw5Uyg9Pq3+euT+sqP+rQGlyzofHSouNVEmFywSFKxGXCk0KyozDA+CHDlmP0RlOR08Zky0dA8NDWetdWaRWifDkpL+rTpuVEpxQAMSKA4/RDBJKDtPFv47BhT7Bv3cASL93AO1AAAACAAA/NoFEwT6ADYARgBHAEgASQBKAEsATAAAAQMOAiMiJiY1NDY3JjU0PgQzMzUhNSEVIREjIgYGFRQWFjMyNyYmNTQ2MzIWFhUUBgcTAQYGFRQWFjMyNjY3BiMiJgETAxERAQQhdkaEqWdxrmGDeiIZO1htilAK/NcFE/67Y56mV0mXbwweBAVNPDlTKD0+rfz+V2M/ZT1RjXRFJx5zxAE65FT97/6tAUZQZD5SlF11oyRLYjlfW0MrF8OSkv6tNnBWSnFAAxIoDj9EMEkoO08W/joCHhNmRUBUJEJqWwU9BC77Bv3cASL93AQUAAAABgAA/NoGcgT6AGYAZwBoAGkAagBrAAAlBiMiLgM1ND4CMzM1ITUhFSERIyIGBhUUFhYzMjcmJjU0NjMyFhYVFAYHEwcDDgMjIiYmNTQ2NyYmNTQ2NjMyFhYVFAYHJzY1NCYjIgYGFRQWFzYzMhcHJiMiBhUUFjMyJBMTAxERBMkmHmClfE8qUYiyaAr7eAZy/rtjpKZRSZdvDB4EBU08OVMoPT6tjnQweZGsY3itWBUXj5FHgVBEbT8oJHgpLiojPCOHgj1UKScNFBlUWnBekwELNeSHlAYqS2F8TWaRWifDkpL+rTpuVEpxQAMSKA4/RDBJKDtPFv47OQFCPnNYNUZ8UCNHISSYcEx8RzFbOS5bIU8jLSQsKEYpUFsIGwaLA0M9Q0yqBRP7Bv3cASL93AAAAAAGAAD82gi4BPoAUgBzAHQAdQB2AHcAACUGIyIuAzU0PgIzMzUhNSEVIxEjEQYGIyInDgMjIiYmNTQ2NyYmNTQ2NjMyFhYVFAYHJzY1NCYjIgYGFRQWFzYzMhcHJiMiBhUUFjMyJCUWFjMyNjcRIREjIgYGFRQWFjMyNyYmNTQ2MzIWFhUUBhMREQMEySYcYKZ8TytRiLJoCvt4CLjVpDJ1SbVzMHuZtmp4rVgVF4+RR4FQRG0/KCR4KS4qIzwjh4I9VCknDRQZVFpwXpMBCwEPJm9WQHw2/e5jpKZRSZdvDB4EBU08OVMoO9SRlAYqSmJ8TWaRWifDkpL6ZgENHCN2RH9jO0Z8UCNHISSYcEx8RzFbOS5bIU8jLSQsKEYpUFsIGwaLA0M9Q0yqxD1AMS4D2/6tOm5USnFAAxIoDj9EMEkoOk/9GgEi/dwB0wAAAAAGAAD82gRvBPoARABFAEYARwBIAEkAABMuAjU0PgIzMzUhNSEVIREhIgYGFRQWFzYzMhYXByYjIgYGFRQWMzI3JiY1NDYzMhYWFRQGBxYXByYmJwYjIiYmNTQBEwMREe43PCYyVW5T6P17BG/+u/5uQD8mQT5fdiZXFAxBO1d4PZifDx4HBU08OVMoMTg6VogjUxs7OYrXcwHmz8cB4iRCVDU/Vzgar5KS/r8QLiMrRxQoBwOVCjRbOmdqAxIoDj9EMEkoOEwWcHNEOZY8CVWca4EDdvsG/dwBIv3cAAAAAAIAAP/9BfkE+gAlADsAAAEiLgI1NDY2MzM1ITUhFSMRIzUhFRQGIyImJjU0NjMzNC4CIxMRISIOAxUUHgIzMzIeAhUhEQGKQWRbNU6ViHr9xgX51aX+vTYtMHZPQD07EyQ0IM7+1zk/JhkMEh4rI6NYY0QjAUkBzxY/YT1VazevkpL7mMdKPERVfTIrLSkuGAYCmv6/BxAZIhYbIxcKHkNgRgMPAAAAAAIAAP/pBZcE+gAbADkAAAEVIxEjNQYGIyImJjU0Ny4CNTQ+AjMzNSE1ATYzMhYXByYmIyIGFRQWMzI2NjcRIREhIgYGFRQWBZfVpH74jHKvXUo4OSIyVW5TjP3XAW1bcC96Gww1TyqAf3NpZ7CWUv6w/spAPyZABPqS+5jGd2ZLiFp7VCdCUTI/Vzgar5L9SCQNCJIICltWUFdNhmoCsv6/EC4jK0UAAAEAAP/pBIoE+gA5AAABESEiBgYVFBYXNjMyFhcHJiYjIgYVFBYzMjY2NxcOBSMiJiY1NDcuAjU0PgIzMzUhNSEVAs7+ykA/JkA6W3AvehsMNU8qgH9zaWewllJsQU5gXmt0QnKvXUo4OSIyVW5TjP3XBAcEaP6/EC4jK0UUJA0IkggKW1ZQV02GantLS0o1KBVLiFp7VCdCUTI/Vzgar5KSAAAAAwAAAAAEqgT6AB8AKwAsAAABIxEjEQUXFhUUBgYjIi4CNTQ2Nzc2Ny4CNREjNSEFERQeAzMyNjcRAQSq1aT+XzETHjgpLmRSJTEzcRtUU3EqogSq/JwOHy5GMUmTPf7nBGj7mAHr7FgiHxwtHSM9QB0hMR0+DygVd5V9ASmSkv7sYmA3KRRGPwHF/SUAAAAAAwAAAAAHfgT6ADAAPgA/AAABBgYjIiYnDgIjIiYmNTQ2NjMyFhcHJiMiBhUUFhYzMjY3Jic2NjU0JyE1IRUjESMBFhYzMjY3ESEWFhUUBgUGBUalWGGePUdzjldvtWRqwn8wgRsMckaBjUFpPmWnOx0MhHVL/EgHftWk/b8pf2JWnUT+aBwfc/7VAXAvM0hIQEQpWJ5jcqVVDQiVFXFlQ1wrV1dKWRxkVl1jkpL7mAJpZ2NJSwI1NG81bJdNAAAABQA5/NoFrwUPADgARQBGAEcASAAAASMRIzUGBiMiJiYnBycBFwcGBhUUHgIzMjY3ESYkJwYHJzY3JiY1NDY2MzIWFhUUBgcWFxEjNSEBNjY1NCYjIgYGFRQWARERBa/VpTR5TVWOVAHIXwIwREY2MRssNhxIezaN/tx1rsZiuIVdYkyVYViOUlFSqf6CAfz8TEZLUEgpRi1UAg0EaPqAoicmSItdi4gBUH0uI1Q2KjokEEBGAasMSTRzc4peT0KSXUd5SUN9UFCMQEMOAdaS/ioubz9CRiBBLj1s+owBIv3cAAAABwA5/NoEkAUPABgAJQA+AD8AQABBAEIAAAEmJCcGByc2NyYmNTQ2NjMyFhYVFAYHFhclNjY1NCYjIgYGFRQWAQ4CIyImJicHJwEXBwYGFRQeAjMyNjcDAxERBDWN/tx1rsZiuIVdYkyVYViOUlFSqf79xkZLUEgpRi1UAuQ9YXBHVY5UAchfAjBERjYxGyw2HEh7Nq6WAfsMSTRzc4peT0KSXUd5SUN9UFCMQEMOki5vP0JGIEEuPWz8hDo9IEiLXYuIAVB9LiNUNio6JBBARv6Y/vQBIv3cAAAAAAUAOfzaBa8FDwA8AEkASgBLAEwAAAEjESM1BgYjIiYmNTQ2NjMyFhcHJiYjIgYVFBYzMjY3ESYkJwYHJzY3JiY1NDY2MzIWFhUUBgcWFxEjNSEBNjY1NCYjIgYGFRQWARERBa/VpTuJSGOeXFuobytyGAwgUCdrdHBgQHw4jf7cda7GYriFXWJMlWFYjlJRUqn+ggH8/ExGS1BIKUYtVAGpBGj6gJwjJE6SX2GOTA4HjQcOX1ZSWTIwAc8MSTRzc4peT0KSXUd5SUN9UFCMQEMOAdaS/ioubz9CRiBBLj1s+owBIv3cAAAHADn82gRxBQ8AGAAlAEMARABFAEYARwAAASYkJwYHJzY3JiY1NDY2MzIWFhUUBgcWFyU2NjU0JiMiBgYVFBYBDgMjIiYmNTQ2NjMyFhcHJiYjIgYVFBYzMjY3AwMREQQ1jf7cda7GYriFXWJMlWFYjlJRUqn+/cZGS1BIKUYtVALFOklRSipjnlxbqG8rchgMIFAna3RwYEB8OFONAfsMSTRzc4peT0KSXUd5SUN9UFCMQEMOki5vP0JGIEEuPWz8UiIhGApOkl9hjkwOB40HDl9WUlkyMP68/vQBIv3cAAIAOf/nCI0FDwBDAFAAAAEWFz4CMzIWFzY3ESE1IRUjESMRDgIHJzY3JiYjIgYVFB4EFwcuBCcmJwYHJzY3JiY1NDY2MzIWFhUUBic2NjU0JiMiBgYVFBYCjmiNFWN+P1aYR3O0/KIE19WkT2hQHp0ZJzxiMk9bCxovT35VbjSib0UeBMuarsZiuIVdYkyVYViOUlHlRktQSClGLVQC4ygfQl4sOTtoCgECkpL7mALXCD1/aDFgRS0kWkwcNjlFVG9HayuPfG9cLyxBc3OKXk9Ckl1HeUlDfVBQjAEubz9CRiBBLj1sAAADADkAAAXDBQ8ANQBCAEMAAAEjESMRBRcWFRQGBiMiLgI1NDY2Nzc2NyYmJwYHJzY3JiY1NDY2MzIWFhUUBgcWFhcRIzUhATY2NTQmIyIGBhUUFgEFw9Wl/oYxEx44KS5kUiUPLClxn6hm3VWuxmK4hV1iTJVhWI5SUVJY6HuWAhD8OEZLUEgpRi1UAp0EaPuYAdXWWCIfHC0dIz1AHRIgJhc+V00RPyZzc4peT0KSXUd5SUN9UFCMQCMpBQHWkv4qLm8/QkYgQS49bP6wAAAABAAAAAAEygT6ACIAJQAsAC0AAAERByMiDgIVFB4CMzI2NxcGBiMiJiY1NDcmJjURIzUhFSEBEQUUFhc2NwEBA/WEbmejcTwzV3A9W6llOGPOYJjfdHxAOKwEyvz3AY/+ADA1eJj+iwFaBGj+RYAUMFA7Ok0tEi83nDExU6Fyi1g5iFgBBpKS/nIBjtR1dh0kAwF4+9UAAP//AAD93ATKBPoCJgtWAAABBwoQBEQAAAAAAAAABgAAAAAE5wT6ABsAHgAmADUANgA3AAABEQcWFhUUDgIjIi4ENTQ3JiY1ESM1IRUhAREFFBYXNjMzAQEjIgYVFBYzMjY1NC4CEwMECFJWWEiCtW0yeHhWOCCBNzO/BOf87QGP/gAnL3CQH/6LAYRErKuilp2fKEBOWc0EaP5FSTaOVll9TyUTL0BNWzucVjWAVgEGkpL+cgGO1Gt0Hx0BeP4CaGdkaGBeMUs1IgLX+wYAAAD//wAA/dwE5wT6AiYLWAAAAQcKEARHAAAAAAAAAAYAAPzaBigE+gBAAE8AUABRAFIAUwAAARE0JiYjIxUUBgYjIi4CNTUGBhUUFhYXBy4CNTQ2NyYmNTQ+AjMhNSE1IRUhESEiBgYVFBYXNjMhMhYWFREBIxUUHgMzMj4DNRMDEREErhQvLis7dlpMbEAXbHJIloVuo6dPQkE2NTJVblMCTvvcBij+of0IQD8mMypShgHTbHw3/ivyCBIZJh4hKhgSBqYH/ugCfi8sEul5gkQzYndLywx9dFmfr3drlNLIcVWUNCxmPD9XOBqvkpL+vxAuIyc+FBk2cF/9hwLr0jRCLx0RERsvPTsD+fjiASL93AAFAAD82gWSBPoARQBGAEcASABJAAABESMVFAYjIiYmNTQ2MyE1NCYmIyEiDgIVFBYWFwcuAjU0NjcmJjU0PgIzITUhNSEVIREhIgYGFRQWFzYzITIWFhURARMREQQY2DQqLGdDNjkBnRQvLv7MZHpUJz+erm63ulBCQTY1MlVuUwG4/HIFkv6h/Z5APyYzKlKGAT1sfDf+0Sv+6AGdYzQzTXIwLy9eLywSGERjRE2Mr5Zrm9jBa1WUNCxmPD9XOBqvkpL+vxAuIyc+FBk2cF/9hwYS+OIBIv3cAAAABQAA/NoGhQT6ADAAQQBCAEMARAAAEyYmNTQ+AjMhNSE1IRUjESMRIRUUBiMiJiY1NDYzMyYmIyIGFRQWFhcHLgI1NDYBESEiBgYVFBYXNjMyFhchEQMREfw6NDJVblMBDv0cBoXVpf69Ni0wdFFAPTsKdmyKmD+erm63ulBIAsv+SEA/JjYsVWKtwQ8BSe4CADBmOz9XOBqvkpL6gAGbSjxEU38yKy1kWoR/TYyvlmub2MFrW44Cmf6/EC4jKEATG6ipA1P5dAEi/dwABgAA/NoGaQT6ACwASABJAEoASwBMAAATJiY1ND4CMyE1ITUhFSMRIzUGIyImJic2NjU0JiMiBhUUFhYXBy4CNTQ2AREhESEiBgYVFBYXNjMyHgIVFAYGBxYWMzI2AxERAfs7MjJVblMBDv0cBmnVpHWQZ66AHX9+bGampD+erm63ulBEBDb+mf5IQD8mNixZdE+MZzw0Wkcrc1pFd5v+HAIBMWU6P1c4Gq+Skvpmyj1TrH4ORzU4OYGCTYyvlmub2MFrU5X+ewQf/r8QLiMoQBMbH0FhQkRdPBlCPi39wgEi/dwDbAAAAAUAAPzaBkYE+gBXAFgAWQBaAFsAAAERBgYHJzY3JiYjIgYVFBYWFwcuAzU0PgIzMhc2NzU0JiYjISIOAhUUFhYXBy4CNTQ2NyYmNTQ+AjMhNSE1IRUhESEiBgYVFBYXNjMhMhYWFREBAxERBMxCXB6FGjYfRio9Rzd0cFxIfVszLkxkNX9zQlEULy7+GGR6VCdIloVuo6dPQkE2NTJVblMCbPu+Bkb+ofzqQD8mMypShgHxbHw3/tEl/ugB1xmIZyxsUBARQjc4YmtPaDZucndAPFo7HkUvDyYvLBIYRGNEWZ+vd2uU0shxVZQ0LGY8P1c4Gq+Skv6/EC4jJz4UGTZwX/2HBhL44gEi/dwAAAAABQAA/NoGKAT6AFIAUwBUAFUAVgAAATQmJiMhIg4CFRQWFhcHLgI1NDY3JiY1ND4CMyE1ITUhFSERISIGBhUUFhc2MyEyFhYVESM1BgYjIiYmNTQ2NjMyFhcHJiYjIhUUFjMyNjcDAxERBK4ULy7+NmR6VCdIloVuo6dPQkE2NTJVblMCTvvcBij+of0IQD8mMypShgHTbHw3pUJ7QGKZU1WfaihrGAwaTCXSaV47dTSKBwFmLywSGERjRFmfr3drlNLIcVWUNCxmPD9XOBqvkpL+vxAuIyc+FBk2cF/9QbIhHkd/U1WBRQ0IiAYOlUVILy4E//jiASL93AAAAwAA/+cFtgT6ADkAOgA7AAABNjcRITUhFSERHgMVFAYGByc2NjU0JiMiBgcnNjcmJiMiBhUUHgQXBy4ENTQ2NjMyFgERAtZii/w9Bbb+sSlOPCUwUUeNWlZVTmaNK50ZJzxiMk9bCxovT35VbnJ9ZD0dVphXVpYBNQL1WxMBBZKS/vIMMU5sR1ahkl9nbcFbV16YljFgRS0kWkwcNjlFVG9Ha2F0dGplNV2LTDkBy/sGAAD//wAA/nYFtgT6AiYLYAAAAQcJpwVJAAAAAAAA//8AAP3cBbYE+gImC2AAAAEHChAFOQAAAAAAAP//AAD93AW2BPoCJgtgAAAAJwoQBTkAAAEHCacDUwCLAAAAAAAFADn/5wjMBQ8AUABdAF4AXwBgAAABNjcRITUhFSERHgMVFAYGByc2NjU0JiMiBgcnNjcmJiMiBhUUHgQXBy4EJyYnBgcnNjcmJjU0NjYzMhYWFRQGBxYXPgIzMhYFNjY1NCYjIgYGFRQWAREBBexii/zdBRb+sSlOPCUwUUeNWlZVTmaNK50ZJzxiMk9bCxovT35VbjSib0UeBMuarsZiuIVdYkyVYViOUlFSaI0VY34/Vpb8V0ZLUEgpRi1UBS38ZAL1WxMBBZKS/vIMMU5sR1ahkl9nbcFbV16YljFgRS0kWkwcNjlFVG9HayuPfG9cLyxBc3OKXk9Ckl1HeUlDfVBQjEAoH0JeLDkLLm8/QkYgQS49bAGq+wYCCgAAAAACAAD/6AV9BPoALQAuAAABIxEjESEVFhYVFAYHFhcHJicGIyImJjU0NjMyFhc2NjU0JiMiBgcnNjc1ITUhAQV91aX995SbiHxrVn59Zjk6M08sRj0vVjVpen5tSnxQNICH/qoFffuVBGj7mARooBavjne+OF9kXZtWCSE9KDlBLCkch1heaCAnkD0KnJL8aAACAAD/6ANpBPoAKQAqAAABIRUWFhUUBgcWFwcmJwYjIiYmNTQ2MzIWFzY2NTQmIyIGByc2NzUhNSEBA2n+kZSbiHxrVn59Zjk6M08sRj0vVjVpen5tSnxQNICH/qoDaf2pBGigFq+Od744X2Rdm1YJIT0oOUEsKRyHWF5oICeQPQqckvxoAAD//wAA/vAFfQT6AiYLZQAAAQcJpwOCAHoAAAAA//8AAP7wA2kE+gImC2YAAAEHCacDggB6AAAAAAABAAAAAAlqBywAHwAAASQkISAEFRQWFzMVIxEjESM1MyYmNTQ+AjMgDAIXCJz+3f1R/nn+4/7rJBni1aW2pBkeWKr1ngEeAegBmAFKgATrzt9xhDFXIZL7mARoki5hQ1aDWS5Ym9N7AAEAAAAABJEHLAAfAAABIxEjESM1MyYmNTQ+AjMyHgIXIyYmIyIGFRQWFzMCMNWltqkdHzRijFh2xKaMPqRs75RxfCUY4gRo+5gEaJI4cEBJelcwSZLYjtvTb2E/Yy0AAAEAAAAABPgHLAAfAAABIxEjESM1MyYmNTQ+AjMyHgIXIyYkIyIGFRQWFzMCMNWltqgcHzholF2A2ryfRaZ//umjfYslGeEEaPuYBGiSNWw/S31ZMU2T14rZ1XNkP18qAAEAAAAABaEHLAAgAAABIxEjESM1MyYmNTQ+AjMyHgIXIwAhIg4CFRQWFzMCMNWltqQZHj5xoWOR/+DAUav+xf6JR3JQKicX4QRo+5gEaJIvZD5Pg1wzUpbVhAGtIDpTND1dIwABAAAAAAYNBywAIQAAASYkIyIOAhUUFhczFSMRIxEjNTMmJjU0PgIzMgQWFhcFXq7+gdFPfFYuJhjh1aW2pBkeQXeqaaABGfTRVwTr1tcfO1U2PFojkvuYBGiSLmM+UYNdMlOY1IIAAAEAAAAABngHLAAhAAABJiQjIg4CFRQWFzMVIxEjESM1MyYmNTQ+AjMyBAQWFwXGvf5b6FaHXTElGOLVpbakGR5EfrNwrwEzAQjgXATr1dgfO1Y3OlsikvuYBGiSLmM/UYNcMlSX1YEAAQAAAAAG5AcsAB8AAAEmJCMiBhUUFhczFSMRIxEjNTMmJjU0PgIzMgQEFhcGLcv+Nv++yiUY4tWltqQZHkaEvXe/AU0BHe9hBOvU2XdyOVoikvuYBGiSLmM/UoNcMVSY1YAAAAEAAAAAB1AHLAAfAAABJiQhIgYVFBYXMxUjESMRIzUzJiY1ND4CMzIEBBYXBpXa/hD+6s7WJRji1aW2pBkeSorGfc8BZwEy/mYE69PadnU4WSKS+5gEaJIuYz9ShFsxVZnUfwABAAAAAAe7BywAHwAAASYkISIGFRQWFzMVIxEjESM1MyYmNTQ+AjMyDAIXBv3o/ej+1N7iJBni1aW2pBkeTZDQg98BgAFHAQ1rBOvR3HV4NlkikvuYBGiSLmJBU4NbMFaZ1H4AAQAAAAAIJwcsAB8AAAEmJCEiBhUUFhczFSMRIxEjNTMmJjU0PgIzMgwCFwdl+P3F/rzu7yQZ4tWltqQZHlCW2YrvAZsBWgEccQTr0ttzfDVYIpL7mARoki5iQVSDWy9WmtN+AAEAAAAACJMHLAAfAAABJCQhIgYVFBYXMxUjESMRIzUzJiY1ND4CMzIMAhcHzP77/Z3+pv38JBni1aW2pBkeUp3jkf4BtAFwASt2BOvQ3XN+M1gikvuYBGiSLmFCVYNaL1ea030AAAAAAQAAAAAI/gcsAB8AAAEkJCEgBBUUFhczFSMRIxEjNTMmJjU0PgIzIAwCFwg0/uz9d/6Q/vP+9yQZ4tWltqQZHlWj7ZcBDgHPAYMBOnsE68/ecoEyWCGS+5gEaJIuYkJVg1ouWJrTfP//AAAAAAl6BywCJgtpAAABBwlvChMAWgAAAAD//wAAAAAFKAcsAiYLagAAAQcJbwXBAFoAAAAA//8AAAAABXgHLAImC2sAAAEHCW8GEQBaAAAAAP//AAAAAAYDBywCJgtsAAABBwlvBpwAWgAAAAD//wAAAAAGZQcsAiYLbQAAAQcJbwb+AFoAAAAA//8AAAAABtcHLAImC24AAAEHCW8HcABaAAAAAP//AAAAAAcnBywCJgtvAAABBwlvB8AAWgAAAAD//wAAAAAHegcsAiYLcAAAAQcJbwgTAFoAAAAA//8AAAAAB9YHLAImC3EAAAEHCW8IbwBaAAAAAP//AAAAAAhDBywCJgtyAAABBwlvCNwAWgAAAAD//wAAAAAIpQcsAiYLcwAAAQcJbwk+AFoAAAAA//8AAAAACRAHLAImC3QAAAEHCW8JqQBaAAAAAAABAAAAAApYBywAMwAAASQkISAEFRQWFzMVIxEjESM1MyYmNTQ+AjMyDAIXJjU0PgIzMhYXByYmIyIGFRQWFwic/t39Uf55/uP+6yQZ4tWltqQZHliq9Z7YAX8BUQEkfAgwVXNERGwqLSNNLVRVNzME687fcYQxVyGS+5gEaJIuYUNWg1kuM1yBTygjRGhFIx0WhxQYW0pEf0sAAAABAAAAAAWlBywALQAAASMRIxEjNTMmJjU0PgIzMhYXNjYzMhYXByYmIyIGFRQWFyMmJiMiBhUUFhczAjDVpbapHR80YoxYh9lbGp5xRGwqLSNNLVRVNzOvbO+UcXwlGOIEaPuYBGiSOHBASXpXMGBfX2AdFocUGFtKRH9L29NvYT9jLQABAAAAAAYKBywALwAAASMRIxEjNTMmJjU0PgIzMhYXPgMzMhYXByYmIyIGFRQWFyMmJCMiBhUUFhczAjDVpbaoHB84aJRdmvtqCjZQZzxEbCotI00tVFU3M69//umjfYslGeEEaPuYBGiSNWw/S31ZMW1pNVA2Gx0WhxQYW0pEf0vZ1XNkP18qAAAAAAEAAAAABq4HLAAwAAABIxEjESM1MyYmNTQ+AjMyBBc+AzMyFhcHJiYjIgYVFBYXIwAhIg4CFRQWFzMCMNWltqQZHj5xoWO8ATuGAzFTb0FEbCotI00tVFU3M6/+xf6JR3JQKicX4QRo+5gEaJIvZD5Pg1wzh3o/YUAhHRaHFBhbSkR/SwGtIDpTND1dIwAAAAEAAAAABxYHLAAxAAABJiQjIg4CFRQWFzMVIxEjESM1MyYmNTQ+AjMyBBc+AzMyFhcHJiYjIgYVFBYXBV6u/oHRT3xWLiYY4dWltqQZHkF3qmnUAWSTAS9UckNEbCotI00tVFU3MwTr1tcfO1U2PFojkvuYBGiSLmM+UYNdMpB/Q2VEIx0WhxQYW0pEf0sAAAEAAAAAB34HLAA0AAABJiQjIg4CFRQWFzMVIxEjESM1MyYmNTQ+AjMyHgIXNTQ+AjMyFhcHJiYjIgYVFBYXBca9/lvoVoddMSUY4tWltqQZHkR+s3B22cWyUDBVc0REbCotI00tVFU3MwTr1dgfO1Y3OlsikvuYBGiSLmM/UYNcMidJaEIGRGhFIx0WhxQYW0pEf0sAAAAAAQAAAAAH5gcsADIAAAEmJCMiBhUUFhczFSMRIxEjNTMmJjU0PgIzMh4CFzU0PgIzMhYXByYmIyIGFRQWFwYty/42/77KJRji1aW2pBkeRoS9d4Tv2cNWMFVzRERsKi0jTS1UVTczBOvU2XdyOVoikvuYBGiSLmM/UoNcMSlMbEQRRGhFIx0WhxQYW0pEf0sAAQAAAAAITgcsADQAAAEmJCEiBhUUFhczFSMRIxEjNTMmJjU0PgIzMgQWFhcmNDU0PgIzMhYXByYmIyIGFRQWFwaV2v4Q/urO1iUY4tWltqQZHkqKxn2RAQft010BMFVzRERsKi0jTS1UVTczBOvT2nZ1OFkikvuYBGiSLmM/UoRbMStPcUYIDQhEaEUjHRaHFBhbSkR/SwABAAAAAAi2BywANAAAASYkISIGFRQWFzMVIxEjESM1MyYmNTQ+AjMyBAQWFyY0NTQ+AjMyFhcHJiYjIgYVFBYXBv3o/ej+1N7iJBni1aW2pBkeTZDQg58BHwEA5GMCMFVzRERsKi0jTS1UVTczBOvR3HV4NlkikvuYBGiSLmJBU4NbMCxSdUgKEwpEaEUjHRaHFBhbSkR/SwAAAAABAAAAAAkeBywAMwAAASYkISIGFRQWFzMVIxEjESM1MyYmNTQ+AjMyBAQWFyY1ND4CMzIWFwcmJiMiBhUUFhcHZfj9xf687u8kGeLVpbakGR5QltmKrQE3ART0aQMwVXNERGwqLSNNLVRVNzME69Lbc3w1WCKS+5gEaJIuYkFUg1svLlV4ShgZRGhFIx0WhxQYW0pEf0sAAAEAAAAACYYHLAA0AAABJCQhIgYVFBYXMxUjESMRIzUzJiY1ND4CMzIMAhcmJjU0PgIzMhYXByYmIyIGFRQWFwfM/vv9nf6m/fwkGeLVpbakGR5SneORuwFOASkBA28CAjBVc0REbCotI00tVFU3MwTr0N1zfjNYIpL7mARoki5hQlWDWi8wV3tMDx0ORGhFIx0WhxQYW0pEf0sAAAEAAAAACe4HLAAzAAABJCQhIAQVFBYXMxUjESMRIzUzJiY1ND4CMzIMAhcmNTQ+AjMyFhcHJiYjIgYVFBYXCDT+7P13/pD+8/73JBni1aW2pBkeVaPtl8kBZgE9ARN2BjBVc0REbCotI00tVFU3MwTrz95ygTJYIZL7mARoki5iQlWDWi4xWn5NISFEaEUjHRaHFBhbSkR/SwAAAAIAAAAAClgHLAAzAEcAAAEkJCEgBBUUFhczFSMRIxEjNTMmJjU0PgIzMgwCFyY1ND4CMzIWFwcmJiMiBhUUFhcnND4CMzIeAhUUDgIjIi4CCJz+3f1R/nn+4/7rJBni1aW2pBkeWKr1ntgBfwFRASR8CDBVc0REbCotI00tVFU3MwIQHCUVFSUcEBAcJRUVJRwQBOvO33GEMVchkvuYBGiSLmFDVoNZLjNcgU8oI0RoRSMdFocUGFtKRH9L4BUmGxAQGyYVFSYbEBAbJgAAAAACAAAAAAWlBywALQBBAAABIxEjESM1MyYmNTQ+AjMyFhc2NjMyFhcHJiYjIgYVFBYXIyYmIyIGFRQWFzMlND4CMzIeAhUUDgIjIi4CAjDVpbapHR80YoxYh9lbGp5xRGwqLSNNLVRVNzOvbO+UcXwlGOICahAcJRUVJRwQEBwlFRUlHBAEaPuYBGiSOHBASXpXMGBfX2AdFocUGFtKRH9L29NvYT9jLdEVJhsQEBsmFRUmGxAQGyYAAgAAAAAGCgcsAC8AQwAAASMRIxEjNTMmJjU0PgIzMhYXPgMzMhYXByYmIyIGFRQWFyMmJCMiBhUUFhczJTQ+AjMyHgIVFA4CIyIuAgIw1aW2qBwfOGiUXZr7ago2UGc8RGwqLSNNLVRVNzOvf/7po32LJRnhAs8QHCUVFSUcEBAcJRUVJRwQBGj7mARokjVsP0t9WTFtaTVQNhsdFocUGFtKRH9L2dVzZD9fKtEVJhsQEBsmFRUmGxAQGyYAAAAAAgAAAAAGrgcsADAARAAAASMRIxEjNTMmJjU0PgIzMgQXPgMzMhYXByYmIyIGFRQWFyMAISIOAhUUFhczJTQ+AjMyHgIVFA4CIyIuAgIw1aW2pBkePnGhY7wBO4YDMVNvQURsKi0jTS1UVTczr/7F/olHclAqJxfhA3MQHCUVFSUcEBAcJRUVJRwQBGj7mARoki9kPk+DXDOHej9hQCEdFocUGFtKRH9LAa0gOlM0PV0j0RUmGxAQGyYVFSYbEBAbJgAAAAIAAAAABxYHLAAxAEUAAAEmJCMiDgIVFBYXMxUjESMRIzUzJiY1ND4CMzIEFz4DMzIWFwcmJiMiBhUUFhcnND4CMzIeAhUUDgIjIi4CBV6u/oHRT3xWLiYY4dWltqQZHkF3qmnUAWSTAS9UckNEbCotI00tVFU3MwIQHCUVFSUcEBAcJRUVJRwQBOvW1x87VTY8WiOS+5gEaJIuYz5Rg10ykH9DZUQjHRaHFBhbSkR/S+AVJhsQEBsmFRUmGxAQGyYAAAACAAAAAAd+BywANABIAAABJiQjIg4CFRQWFzMVIxEjESM1MyYmNTQ+AjMyHgIXNTQ+AjMyFhcHJiYjIgYVFBYXJzQ+AjMyHgIVFA4CIyIuAgXGvf5b6FaHXTElGOLVpbakGR5EfrNwdtnFslAwVXNERGwqLSNNLVRVNzMCEBwlFRUlHBAQHCUVFSUcEATr1dgfO1Y3OlsikvuYBGiSLmM/UYNcMidJaEIGRGhFIx0WhxQYW0pEf0vgFSYbEBAbJhUVJhsQEBsmAAIAAAAAB+YHLAAyAEYAAAEmJCMiBhUUFhczFSMRIxEjNTMmJjU0PgIzMh4CFzU0PgIzMhYXByYmIyIGFRQWFyc0PgIzMh4CFRQOAiMiLgIGLcv+Nv++yiUY4tWltqQZHkaEvXeE79nDVjBVc0REbCotI00tVFU3MwIQHCUVFSUcEBAcJRUVJRwQBOvU2XdyOVoikvuYBGiSLmM/UoNcMSlMbEQRRGhFIx0WhxQYW0pEf0vgFSYbEBAbJhUVJhsQEBsmAAACAAAAAAhOBywANABIAAABJiQhIgYVFBYXMxUjESMRIzUzJiY1ND4CMzIEFhYXJjQ1ND4CMzIWFwcmJiMiBhUUFhcnND4CMzIeAhUUDgIjIi4CBpXa/hD+6s7WJRji1aW2pBkeSorGfZEBB+3TXQEwVXNERGwqLSNNLVRVNzMCEBwlFRUlHBAQHCUVFSUcEATr09p2dThZIpL7mARoki5jP1KEWzErT3FGCA0IRGhFIx0WhxQYW0pEf0vgFSYbEBAbJhUVJhsQEBsmAAACAAAAAAi2BywANABIAAABJiQhIgYVFBYXMxUjESMRIzUzJiY1ND4CMzIEBBYXJjQ1ND4CMzIWFwcmJiMiBhUUFhcnND4CMzIeAhUUDgIjIi4CBv3o/ej+1N7iJBni1aW2pBkeTZDQg58BHwEA5GMCMFVzRERsKi0jTS1UVTczAhAcJRUVJRwQEBwlFRUlHBAE69HcdXg2WSKS+5gEaJIuYkFTg1swLFJ1SAoTCkRoRSMdFocUGFtKRH9L4BUmGxAQGyYVFSYbEBAbJgACAAAAAAkeBywAMwBHAAABJiQhIgYVFBYXMxUjESMRIzUzJiY1ND4CMzIEBBYXJjU0PgIzMhYXByYmIyIGFRQWFyc0PgIzMh4CFRQOAiMiLgIHZfj9xf687u8kGeLVpbakGR5QltmKrQE3ART0aQMwVXNERGwqLSNNLVRVNzMCEBwlFRUlHBAQHCUVFSUcEATr0ttzfDVYIpL7mARoki5iQVSDWy8uVXhKGBlEaEUjHRaHFBhbSkR/S+AVJhsQEBsmFRUmGxAQGyYAAAACAAAAAAmGBywANABIAAABJCQhIgYVFBYXMxUjESMRIzUzJiY1ND4CMzIMAhcmJjU0PgIzMhYXByYmIyIGFRQWFyc0PgIzMh4CFRQOAiMiLgIHzP77/Z3+pv38JBni1aW2pBkeUp3jkbsBTgEpAQNvAgIwVXNERGwqLSNNLVRVNzMCEBwlFRUlHBAQHCUVFSUcEATr0N1zfjNYIpL7mARoki5hQlWDWi8wV3tMDx0ORGhFIx0WhxQYW0pEf0vgFSYbEBAbJhUVJhsQEBsmAAAAAgAAAAAJ7gcsADMARwAAASQkISAEFRQWFzMVIxEjESM1MyYmNTQ+AjMyDAIXJjU0PgIzMhYXByYmIyIGFRQWFyc0PgIzMh4CFRQOAiMiLgIINP7s/Xf+kP7z/vckGeLVpbakGR5Vo+2XyQFmAT0BE3YGMFVzRERsKi0jTS1UVTczAhAcJRUVJRwQEBwlFRUlHBAE68/ecoEyWCGS+5gEaJIuYkJVg1ouMVp+TSEhRGhFIx0WhxQYW0pEf0vgFSYbEBAbJhUVJhsQEBsmAAAAAAH90AAAAjAHLAAhAAABIxEjESM1My4DIyIGFRQWFyMmJjU0PgIzMh4CFzMCMNWltrAnUFpnP2BlKB2jIyMvWYFSZKCDbDHhBGj7mARokmudZjJqW0RvN0J/RUV0VC5DitSRAAAB/WAAAAIwBywAIQAAASMRIxEjNTMuAyMiBhUUFhcjJiY1ND4CMzIeAhczAjDVpbatMWZve0ZtdSocpiEjM2CJVnC4moI64ARo+5gEaJJqnGcybl5EbDI/eURIeFUwRo3SjQAAAfx/AAACMAcsACIAAAEjESMRIzUzACEiDgIVFB4CFyMmJjU0PgIzMh4CFzMCMNWltqf+7f6rQmlKJw0UGg2qHiQ7bJpghunKrUvfBGj7mARokgGeHzlRMiI7My4UNm9DTX9bMk2R0IQAAAD///3QAAACMAcsAiYLmgAAAQcJbwKYAFoAAAAA///9YAAAAjAHLAImC5sAAAEHCW8CiwBaAAAAAP///H8AAAIwBywCJgucAAABBwlvAm8AWgAAAAAAAf3QAAACbwcsADIAAAEjESMRIzUzLgMjIgYVFBYXIyYmNTQ+AjMyFhc2NjMyFhcHJiYjIgYVFBYXFhYXMwIw1aW2sCdQWmc/YGUoHaMjIy9ZgVJrpEQjkmJEbCotI00tVFUCAhQoE+EEaPuYBGiSa51mMmpbRG83Qn9FRXRULkpNS0wdFocUGFtKDhsOLWM4AAAAAAH9YAAAAm8HLAAwAAABIxEjESM1My4DIyIGFRQWFyMmJjU0PgIzMhYXNjYzMhYXByYmIyIGFRQXFhczAjDVpbatMWZve0ZtdSocpiEjM2CJVoDIVB2dbURsKi0jTS1UVRscHeAEaPuYBGiSapxnMm5eRGwyP3lESHhVMFlbWlodFocUGFtKQUA7QwAAAfx/AAACbwcsADIAAAEjESMRIzUzACEiDgIVFB4CFyMmJjU0PgIzMgQXPgMzMhYXByYmIyIGFRQWFzMCMNWltqf+7f6rQmlKJw0UGg2qHiQ7bJpgrAEcdwY0Ums/RGwqLSNNLVRVMi3VBGj7mARokgGeHzlRMiI7My4UNm9DTX9bMnxzO1o8Hh0WhxQYW0pBeUUAAAL90AAAAm8HLAAyAEYAAAEjESMRIzUzLgMjIgYVFBYXIyYmNTQ+AjMyFhc2NjMyFhcHJiYjIgYVFBYXFhYXMyc0PgIzMh4CFRQOAiMiLgICMNWltrAnUFpnP2BlKB2jIyMvWYFSa6REI5JiRGwqLSNNLVRVAgIUKBPhzBAcJRUVJRwQEBwlFRUlHBAEaPuYBGiSa51mMmpbRG83Qn9FRXRULkpNS0wdFocUGFtKDhsOLWM40RUmGxAQGyYVFSYbEBAbJgAC/WAAAAJvBywAMABEAAABIxEjESM1My4DIyIGFRQWFyMmJjU0PgIzMhYXNjYzMhYXByYmIyIGFRQXFhczJzQ+AjMyHgIVFA4CIyIuAgIw1aW2rTFmb3tGbXUqHKYhIzNgiVaAyFQdnW1EbCotI00tVFUbHB3gzBAcJRUVJRwQEBwlFRUlHBAEaPuYBGiSapxnMm5eRGwyP3lESHhVMFlbWlodFocUGFtKQUA7Q9EVJhsQEBsmFRUmGxAQGyYAAAAC/H8AAAJvBywAMgBGAAABIxEjESM1MwAhIg4CFRQeAhcjJiY1ND4CMzIEFz4DMzIWFwcmJiMiBhUUFhczJzQ+AjMyHgIVFA4CIyIuAgIw1aW2p/7t/qtCaUonDRQaDaoeJDtsmmCsARx3BjRSaz9EbCotI00tVFUyLdXMEBwlFRUlHBAQHCUVFSUcEARo+5gEaJIBnh85UTIiOzMuFDZvQ01/WzJ8cztaPB4dFocUGFtKQXlF0RUmGxAQGyYVFSYbEBAbJgAAAAP7vP3cAC4AFgAcACwALQAABTY2MzIWFhUUBgYjIiYmJzceAjMyNjU0JiMiBwUyFhYVFAYGIyImJjU0NjYl/kIvazNZgUVLkWNzvJlMfkh0gFFTXk5ESkX9tx4zICEzHR4zIB80ApUOEhJIg1RTgUdTlHFYbnc5UEk+RR0JHzggITgeHzchIDgfnAAAA/u8/dwALgAWABwALAAtAAAFNjYzMhYWFRQGBiMiJiYnNx4CMzI2NTQmIyIHBTIWFhUUBgYjIiYmNTQ2NiX+Qi9rM1mBRUuRY3O8mUx+SHSAUVNeTkRKRf23HjMgITMdHjMgHzQCEw4SEkiDVFOBR1OUcVhudzlQST5FHQkfOCAhOB4fNyEgOB+cAAAC/Nv93AAuABYAHAAdAAAFNjYzMhYWFRQGBiMiJiYnNx4CMzI2NTQmIyIHN/5CL2szWYFFS5Fjc7yZTH5IdIBRU15OREpFLg4SEkiDVFOBR1OUcVhudzlQST5FHZMAAAAD/AL93AEoABYAHwAvADAAAAMGIyImJjU0NjYzMh4CFwcuAiMiBhUUHgIzMjY3JTIWFhUUBgYjIiYmNTQ2NiWAcXFllE5VoGlhqI2KU31QkqdiZW0cLjwfLlQ3/SceMyAhMx0eMyAfNAJP/gsvSIRVTYFLMlqPflB4llBORCc1IQ4SGsofOCAhOB4fNyEgOB+cAAAAA/wC/dwBKAAWAB8ALwAwAAADBiMiJiY1NDY2MzIeAhcHLgIjIgYVFB4CMzI2NyUyFhYVFAYGIyImJjU0NjYlgHFxZZROVaBpYaiNilN9UJKnYmVtHC48Hy5UN/0nHjMgITMdHjMgHzQBzf4LL0iEVU2BSzJaj35QeJZQTkQnNSEOEhrKHzggITgeHzchIDgfnAAAAAP8ev3cADgAJAAXACcAKAAAEwYGIyImJjU0NjY3FwYGFRQeAjMyNjclMhYWFRQGBiMiJiY1NDY2JThIhURllE5RpXMPcm8cLjsgOV5R/OceMyAhMx0eMyAfNAHX/iIkIkiEVU2BUwaHDFdIIzIhDxkqtB84ICE4Hh83ISA4H5wAA/x6/dwAOAAkABcAJwAoAAATBgYjIiYmNTQ2NjcXBgYVFB4CMzI2NyUyFhYVFAYGIyImJjU0NjYlOEiFRGWUTlGlcw9ybxwuOyA5XlH85x4zICEzHR4zIB80AVX+IiQiSIRVTYFTBocMV0gjMiEPGSq0HzggITgeHzchIDgfnAAD/Hr82gCIACQAKAA4ADkAABMGBiMiJiY1NDcmJjU0NjY3FwYGFRQWMzI2NxcGBiMiJwYVFBYzMjY3ATIWFhUUBgYjIiYmNTQ2NiWISIVEapRJIDY6UqVyD29yVlA5XVIzSIVEJCEbVVE5XVL8lh4zICEzHR4zIB80Adf9ICQiRnlOOjIlbEFCcEgFhwlIODU5FiOBJCIFHyg2OBYjAcMfOCAhOB4fNyEgOB+cAAAAAAP8evzaAIgAJAAoADgAOQAAEwYGIyImJjU0NyYmNTQ2NjcXBgYVFBYzMjY3FwYGIyInBhUUFjMyNjcBMhYWFRQGBiMiJiY1NDY2JYhIhURqlEkgNjpSpXIPb3JWUDldUjNIhUQkIRtVUTldUvyWHjMgITMdHjMgHzQBVf0gJCJGeU46MiVsQUJwSAWHCUg4NTkWI4EkIgUfKDY4FiMBwx84ICE4Hh83ISA4H5wAAAAAA/xF/NoAXQABAE0ATgBPAAATBgYjIi4CNTQ+Ajc2NjU0JiMiDgIHJzY2NyYmIyIGFRQeAhcHLgM1ND4CMzIWFzY2MzIeAhUUBgcGBhUUHgIzMj4CNwEBXSpsQj1gQiMcOlxACwxBNhw5NjEUjgsaESJHLD9AJUdoQ19GeVozL0pbLEh3ODB3RyhXSC8hHXRiFSIsGBgpKCob/e0B6f0QFSEiPVUzKkk9Lg4WLxw7NxMzWkYlJUMdGR5ENitOUVo2Zjxwb3NAQFs7HC4sLC4aOFtAPG0vDUE0GCQWCwYMEw4CcP4qAAAAAAT7MPzaAI8AAQBNAF0AXgBfAAATBgYjIi4CNTQ+Ajc2NjU0JiMiDgIHJzY2NyYmIyIGFRQeAhcHLgM1ND4CMzIWFzY2MzIeAhUUBgcGBhUUHgIzMj4CNwEyFhYVFAYGIyImJjU0NjYlAY8qbEI9YEIjHDpcQAsMQTYcOTYxFI4LGhEiRyw/QCVHaENfRnlaMy9KWyxIdzgwd0coV0gvIR10YhUiLBgYKSgqG/s6HjMgITMdHjMgHzQDIQFn/RAVISI9VTMqST0uDhYvHDs3EzNaRiUlQx0ZHkQ2K05RWjZmPHBvc0BAWzscLiwsLho4W0A8bS8NQTQYJBYLBgwTDgHUHzggITgeHzchIDgfnP4qAAAAAAT7MPzaAI8AAQBNAF0AXgBfAAATBgYjIi4CNTQ+Ajc2NjU0JiMiDgIHJzY2NyYmIyIGFRQeAhcHLgM1ND4CMzIWFzY2MzIeAhUUBgcGBhUUHgIzMj4CNwEyFhYVFAYGIyImJjU0NjYlAY8qbEI9YEIjHDpcQAsMQTYcOTYxFI4LGhEiRyw/QCVHaENfRnlaMy9KWyxIdzgwd0coV0gvIR10YhUiLBgYKSgqG/s6HjMgITMdHjMgHzQCnwHp/RAVISI9VTMqST0uDhYvHDs3EzNaRiUlQx0ZHkQ2K05RWjZmPHBvc0BAWzscLiwsLho4W0A8bS8NQTQYJBYLBgwTDgHUHzggITgeHzchIDgfnP4qAAAAAAP7MPzaAVcAAQBgAHAAcQAAAQYGIyIuAjU0NyYmNTQ2NyYjIg4CByc2NjcmJiMiBhUUHgIXBy4DNTQ+AjMyFhc2NjMyHgIVFAYVBgYVFB4CMzI+AjcXBgYjIiYnBgYVFB4CMzI+AjcBMhYWFRQGBiMiJiY1NDY2JQFXKmxCPWBCIwcqLTlIHkkcOTYxFI4LGhEiRyw/QCVHaENfRnlaMy9KWyxIdzgwd0coV0gvAVdEFSIsGBgpKCobKCpsQgoTCgEBFSIsGBgpKCob+nIeMyAhMx0eMyAfNAMh/Q4UICA8UzMgGxtSMzdaIjYTM1pGJSVDHRkeRDYrTlFaNmY8cG9zQEBbOxwuLCwuGzxdQgMMAw83IxQcEggGDBMOfRQgAQEFCwUYJBYLBgwTDgHZHzggITgeHzchIDgfnAAC/EX82gElAAEAYABhAAABBgYjIi4CNTQ3JiY1NDY3JiMiDgIHJzY2NyYmIyIGFRQeAhcHLgM1ND4CMzIWFzY2MzIeAhUUBhUGBhUUHgIzMj4CNxcGBiMiJicGBhUUHgIzMj4CNwEBJSpsQj1gQiMHKi05SB5JHDk2MRSOCxoRIkcsP0AlR2hDX0Z5WjMvSlssSHc4MHdHKFdILwFXRBUiLBgYKSgqGygqbEIKEwoBARUiLBgYKSgqG/0l/Q4UICA8UzMgGxtSMzdaIjYTM1pGJSVDHRkeRDYrTlFaNmY8cG9zQEBbOxwuLCwuGzxdQgMMAw83IxQcEggGDBMOfRQgAQEFCwUYJBYLBgwTDgJ1AAP7MPzaAVcAAQBgAHAAcQAAAQYGIyIuAjU0NyYmNTQ2NyYjIg4CByc2NjcmJiMiBhUUHgIXBy4DNTQ+AjMyFhc2NjMyHgIVFAYVBgYVFB4CMzI+AjcXBgYjIiYnBgYVFB4CMzI+AjcBMhYWFRQGBiMiJiY1NDY2JQFXKmxCPWBCIwcqLTlIHkkcOTYxFI4LGhEiRyw/QCVHaENfRnlaMy9KWyxIdzgwd0coV0gvAVdEFSIsGBgpKCobKCpsQgoTCgEBFSIsGBgpKCob+nIeMyAhMx0eMyAfNAKf/Q4UICA8UzMgGxtSMzdaIjYTM1pGJSVDHRkeRDYrTlFaNmY8cG9zQEBbOxwuLCwuGzxdQgMMAw83IxQcEggGDBMOfRQgAQEFCwUYJBYLBgwTDgHZHzggITgeHzchIDgfnAAD/WD93AE2AAAADwAfACAAAAU2MzIeAxcHLgIjIgcnMhYWFRQGBiMiJiY1NDY2N/6RLTVDeHBta0B1SIaXXDYg2R4zICEzHR4zIB808VsLGzVVemFUcYhDCjgfOCAhOB4fNyEgOB+6AAUAAAAABbIE+gAyAEIAQwBEAEUAAAEmJjU0NjYzMzUhNSEVIREhIg4CFRQWFzYzMhYWFRQGBiMiJCc3HgIzMjY1NCYjIgYBMhYWFRQGBiMiJiY1NDY2AQMBAoCAjEqJWPb8awWy/oj+fT01IxA+NVBqbqVaYL2D8P6kh4lStMZ4f4FqYjFgAj4dMR0dMR0dMR0dMf7LjwGcAh8jhlZDXC1+kpL+8AgTGQ8eMw4aO3FPTXlG5ulDjalKPz8yOBIBUB41Hh41Hh41Hh41HgF5+wYBJwAABAAAAAAFuQT6AEkASgBLAEwAAAEHJiY1NDY3NSE1IRUhFRYWFRQGBgQjIiYmNTQ2NyY1NDY2MzIWFwcmIyIGFRQWFzYzMhcHJiMiBhUUFjMyJDY1NCYmIyIGFRQWAwMBBEdDmqNzafxdBbn+j3V8ZcL+7KCBtFoZG6tUl2IpaBcMSEZbW05QOkwuKA0XKF1adnSbAQmWKlE4PkJ0NpoBmQKagSWPY1d1D12SkmMbmHJjvptZQXZNIkYfTZBGZDMNCIsTLjInMw4QBo8DNjM8OWeyaTFQLzEtN0wCTvsGAScAAAAABAAA/dwEJgT6ACMAJAAlACYAAAEjIg4EFRQeAjMyNjcXBgYjIiYmNTQ2NjMzNSE1IRUhJwMDAveOQ19TPCMSL1V1R1SkaTdjzmCS4Hl03I8U/a4EJv7RpQQHA1IKGCYqNR86TjAVLTaZMTFYpXFplk+FkpKS+wb93AAAAAQAAAAABMwE+gAXACQAJQAmAAABIRUeAhUUBCMiLgI1NDY2MzM1ITUhASMiBhUUFjMyNjU0JgMTBMz+K2idU/7+7XC9iEx75I0H/a4EzP3yPbvDsKWoq3ngUQRolB1pik6jrC1ciVxjmFOFkv5YZ2dhamBdS3QBxfsGAAAEAAAAAAUjBPoAMgAzADQANQAAASYmNTQ2NjMzNSE1IRUjESEiDgIVFBYXNjMyFhYVFAYGIyIkJzceAjMyNjU0JiMiBhMDAQKAgIxKiVj2/GsFI+n+fT01IxA+NVBqbqVaYL2D8P6kh4lStMZ4f4FqYjFg7I/+GAIfI4ZWQ1wtfpKS/vAIExkPHjMOGjtxT015RubpQ42pSj8/MjgSAsn7BgEnAAQAAAAABJ4E+gAjADAAMQAyAAABIyIGFRQWFyY1NDY2MzIWFhUUBgYjIiYmNTQ2Njc1ITUhFSEDMjY1NCYjIgYVFBYXAxMDFYDGzHt5GUKOZ1uCPmHEjqL4hIbvnP2QBJ7+d3F5eD0/UFcNDx4gA1VmYl1nDzk7N146O141UHdBVqVxa5dNA4OSkv1IQDomKjs3GSoVA0r7BgAAAAYAAAAABjME+gAhADEAQgBDAEQARQAAARYWFRQGBiMiJicGBiMiJiY1NDY2MzIWFzY2NzUhNSEVIQEWFjMyPgI1NCYmIyIGBycmJiMiDgIVFB4CMzI2NwETAQTAdIJYpGxfo142kWxhnl5bpWhdo18ydFH75AYz/o3+ekR6QStOPCM0VTVKbTSPTXQ9K048Ix8zRCZJbTYBcRD+vgPAH698YJlXP05FSE2dbGOaVD9NP0IInpKS/cM8NxYwSTREVyVVZEVCMhYwSTQ0SS4VVGUCifsGAScAAAMAAADZBbYE+gAzADQANQAAATY3NSE1IRUhFRYWFRQGByc2NTQmIyIOAgcnNjcmJiMiBgYVFBYWFwcuAjU0NjYzMhYBAwLRYJL8PQW2/rFncUdGjXVYSyZPSkQbnRUkNmEyNk0nNYCGW4+rS1CPT1+dATxNA0tWE7SSkrwgoHJstlxniZVQWRY6Z1ItTT0oIyQ9JTRaaFR6UpmUU1J5QzQBdfwtAAAA//8AAP/oBbIE+gImC7YAAAEHCacDsQFyAAAAAP//AAD/mwW5BPoCJgu3AAABBwmnA48BJQAAAAD//wAA/dwEJgT6AiYLuAAAAQcJpwLmAekAAAAA//8AAAAABMwE+gImC7kAAAEHCacDKgG/AAAAAP//AAAAAAUjBPoCJgu6AAABBwmnA00BrgAAAAD//wAAAAAEngT6AiYLuwAAAQcJpwMRAdcAAAAA//8AAP/NBjME+gImC7wAAAEHCacEBQFXAAAAAP//AAD/tgW2BPoCJgu9AAABBwmnA5cBQAAAAAAAAgA5AZ4EGgUPABkAJgAAASYkJwYHJzY3JiY1NDY2MzIWFhUUBgcWFhclNjY1NCYjIgYGFRQWA/ln/tBTrsZiuIVdYkyVYViOUlFSS9xl/eFGS1BIKUYtVAH7D1cjc3OKXk9Ckl1HeUlDfVBQjEAdLgiULm8/QkYgQS49bAAAAAAF/A/93P+DBPoABgAjACQAJQAmAAAlAScBMxMHBSc2MzIWFhUUBgYjIiYmJzceAjMyNjU0JiMiBjcTE/5X/oVgAcVS8Hn+qC1kaVmGRkyUY3O+n1VqUoKKVltYS0MsT30YBHr++4YBFf7ZT5GJLT5uSENtPz1sV2hTXC45MDA0GOQBJwPTAAAAA/wP/Nr/gwAOAAYAIwAkAAAFAScBMxMHBSc2MzIWFhUUBgYjIiYmJzceAjMyNjU0JiMiBhP+V/6FYAHFUvB5/qgtZGlZhkZMlGNzvp9ValKCilZbWEtDLE99iP77hgEV/tlPkYktPm5IQ20/PWxXaFNcLjkwMDQYAeYA///7UPza/4MADgAmC8gAAAEHCaf+BAA3AAAAAAAF/Hz93ABvBPoABgAlACYAJwAoAAAlAScBMxMHAxcGIyImJjU0NjYzMh4DFwcuAiMiBhUUFjMyNgMTE/5X/oVgAcVS8Hk9LGRoW4ZFSoxdQHJraWlGfUV+jFJaWUtDLk5UGAR6/vuGARX+2U/++IktP25IQm0/GC9MbV1QZnxAOTAwNBgBgQEnA9MAAAP8fPzaAG8ADgAGACUAJgAABQEnATMTBwMXBiMiJiY1NDY2MzIeAxcHLgIjIgYVFBYzMjYD/lf+hWABxVLweT0sZGhbhkVKjF1AcmtpaUZ9RX6MUlpZS0MuTlSI/vuGARX+2U/++IktP25IQm0/GC9MbV1QZnxAOTAwNBgCgwD///tQ/NoAbwAOACYLywAAAQcJp/4EADcAAAAAAAX8Rv3c/74E+gAGABwAHQAeAB8AACUBJwEzAQcTBgYjIiYmNTQ2NjcXBgYVFBYzMjY3ARMT/lf+T2AB+1IBK3lLSIVEaZRKUaZyD3NuU0tBZ0j+9xgEev7WhgE6/o9P/tIkIjtsR0J1TgaHCT05MDIbJAFUAScD0wAAAAAD/Eb82v++AA4ABgAcAB0AAAUBJwEzAQcTBgYjIiYmNTQ2NjcXBgYVFBYzMjY3Af5X/k9gAftSASt5S0iFRGmUSlGmcg9zblNLQWdI/veI/taGATr+j0/+0iQiO2xHQnVOBocJPTkwMhskAlYAAAD///tQ/Nr/vgAOACYLzgAAAQcJp/4EADcAAAAAAAX8fPza/9QE+gAGAC8AMAAxADIAACUBJwEzEwcTFwYGIyImJjU0NyYmNTQ2NjcXBgYVFBYzMjY3FwYGIyInBhUUFjMyNgMTE/5X/oVgAcVS8HmXM0iFRGqTSh82OVGkdA9xcFJUOV1SM0iFRCcjFlJUOV37GAR6/vuGARX+2U/+B4ElIT1tRjAxImI8PGdCBYcIODEsLxYjgSUhBRcfLywWAoIBJwPTAAX8RfzaAF0E+gAGAEYARwBIAEkAACUFJyUzEwcBFwYGIyImJjU0Njc2NTQmIyIGByc2NyYmIyIGFRQeBBcHLgI1NDY2MzIWFzYzMhYWFRQHBgYVFBYzMjYBExP+V/68YAGOUvB5ASsoKmtDUHY8eXkXPzhKZiCOGB4iRyw8QwkVJD1sLF+Bg0hDeEVHeDhijEB2QD5pbTc1KlX+XRgEeuCG8P7ZT/3vexUfNFs6TmgYJCUqLF1bJT4lFBc1LBEhIigzSRxmXHh5Q0BdNC4sWjNfQGNcAi8uISMSApEBJwPTAAAAAAX8RfzaASUE+gAGAFgAWQBaAFsAACUFJyUzEwcBFwYGIyImJjU0NyYmNTQ2NyYmIyIGByc2NyYmIyIGFRQeBBcHLgI1NDY2MzIWFzYzMh4CFRUGFQYVFBYzMjY3FwYGIyInBhUUFjMyNgETE/5X/rxgAY5S8HkB9Cc7aDVQdjwGKys+OhYtG0pmII4YHiJHLDxDCRUkPWwsX4GDSEN4RUd4OGKMKVdILgGbOzIwTz4nO2g1ExQCPjQ1UP2JGAR64Ibw/tlP/e15HBg0XT4gEhdJLC5LFRALXVslPiUUFzUsESEiKDNJHGZceHlDQF00LixaGjZROAgGAwk7GRwTHXocGAIICSInEwKVAScD0wAABfxG/dwAPAT6AAYAFgAXABgAGQAAJQEnATMBByU2MzIeAxcHLgIjIgcbAv5X/k9gAftSASt5/lItNUd6b2tdS3VJhZdcOB6kGAR6/taGATr+j08TCxsxS1ZRVFprNQcBMQEnA9MAAAAE/Eb82gA8AAAABgAWABcAGAAABQEnATMBByU2MzIeAxcHLgIjIgcTAf5X/k9gAftSASt5/lItNUd6b2tdS3VJhZdcOB6k/tuu/taGATr+j085CxsxS1ZRVFprNQcCM/3cAAD///tQ/NoAPAAAACYL1AAAAQcJp/4EADcAAAAAAAP8XPzaAC4AAAAeAB8AIAAAATY2MzIWFhUUBgYjIiYmJzceAjMyNjU0JiYjIgYHEwP+BTZ0OmWTTVemcoDYtVZ+VY6eZGxsLksrL1czar3+5RcYSIFWU4FHUpZwV2t4Ok5LKzsdExgBo/7+AAP9V/zaASgAAAAfACAAIQAAAwYjIiYmNTQ2NjMyHgIXBy4CIyIGFRQeAjMyNjcDAYBxcWWUTlWgaWGojYpTfVCSp2JlbRwuPB8uVDeo/un9CS9IhFVNgUsyWo9+UHiWUE5EJzUhDhIaAmj+/gAAAAAD/eD82gA4AAAAFwAYABkAABMGBiMiJiY1NDY2NxcGBhUUHgIzMjY3ARM4SIVEZZROUaVzD3JvHC47IDleUf6gyf0gJCJIhFVNgVMGhwxXSCMyIQ8ZKgJS/v4AAAL94P3cAIgAIQAoACkAABMGBiMiJiY1NDcmJjU0NjY3FwYGFRQWMzI2NxcGBiMiJwYVFBYzMjY3AYhIhURqk0ofNjlSpHMPcXBSVDldUjNIhUQnIxZSVDldUv5P/hEbGi9TNSQmGkouLk4yBGcGKiYhJBEaYhsaBBIXJCIRGwGMAAAAAAL8Rf3dAF0ADwA/AEAAABMGBiMiJiY1NDY3NjU0JiMiBgcnNjcmJiMiBhUUHgQXBy4CNTQ2NjMyFhc2MzIWFhUUBwYGFRQWMzI2NwFdKmtDUHY8eXkXPzhKZiCOGB4iRyw8QwoVIT5kNV+Bg0hDeEVHeTdijEB2QD5pbTc1KlU+/e3+BxEZKkkvP1QUHR4iI0tKHjMdERIrIw8aHB8pNxtTS2FhNzNMKiYjSSpMNFBLASYlGxwOFQGWAAAAAvxF/d0BJQAPAFEAUgAAAQYGIyImJjU0NyYmNTQ2NyYmIyIGByc2NyYmIyIGFRQeBBcHLgI1NDY2MzIWFzYzMh4CFRUGFQYVFBYzMjY3FwYGIyInBhUUFjMyNjcBASU7aDVQdjwGKys+OhYtG0pmII4YHiJHLDxDChUhPmQ1X4GDSEN4RUd5N2KMKVdILgGbOzIwTz4nO2g1ExQCPjQ1UDP9JP4HFxMqSzIaDhM7JCU8EQ0JS0oeMx0REisjDxocHyk3G1NLYWE3M0wqJiNJFStDLQYFAwcwFBYPF2IXEwEGBxwgEBYBmAAAAAAD/pH82gE2AAAADwAQABEAAAE2MzIeAxcHLgIjIgcDE/6RLTVDeHBta0B1SIaXXDYgBmX+owsbNVV6YVRxiEMKAfT+/gAAAAADAAD/iQRvBPoAQgBDAEQAABMuAjU0PgIzMzUhNSEVIREhIgYGFRQWFzYzMhcHJiMiBhUUFjMyNyYmNTQ2MzIWFhUUBgcWFwcmJwYGIyImJjU0ARPRIzMmMlVuU+j9ewRv/rv+bkA/JiojbotRKAgZQIWIk6QPHgcFSz4zVC0zNixMiFIpEkARkN51AeaYAh4aOFU0P1c4GoeSkv7nEC4jJDkTOQeRA0lOT08CECQMNj4rRSU1RhNPZESEWgMGRINbVwMj+wYAAAQAAP9dBq0E+gBZAFoAWwBcAAAFAwcFFxYVFAYjIiYmNTQ2NzcnLgIjIgYVFBYzMjY3FwYjIiYmNTQ2NjMyHgIXFyUmJjU0NjYzMzUhNSEVIREjIgYGFRQWFjMyNyYmNTQ2MzIWFhUUBgcTARMnBYt9Bv3wGA04LDN4UCQsMkYdNz8zMkA0Lh07JyRPUUBpO0J4QT5ZSUIfSQEfjZR145sK+z0Grf67Y5izUEiXcAweBAVNPDlTKD0+fP6ry9VrAXEB2jofHSouNVEmFywSFKxGXCk0KyozDA+CHDlmP0RlOR08Zky0dSKvg22ZTqCSkv7QL11KP2A3AxIoDj9EMEkoO08W/qsFNvsG+wAD/WIE+gBbBywAEwAnACgAAAE0PgIzMh4CFRQOAiMiLgIlDgMjIiYnNx4DMzI+AjcB/ogOGSITEyIZDg4ZIhMTIhkOAdMVRF13SYm9PYMXMT1MMzZLMyIP/tYG0BMiGQ4OGSITEyIZDg4ZIj9ghVMlqLQuRGRCISVFZED9zgAAAAP73AT6AVUHLAAoADwAPQAAASYmJzceAzMyPgI3Fw4DIyImJw4DIyImJzcWFjMyPgI3JTQ+AjMyHgIVFA4CIyIuAgP+mAUKBYMVLThGLzJELx8OjRRAWXBFWIg1Gklaaz2I0EWRL3tgNEs3Jg8BNA4ZIhMTIhkODhkiExMiGQ7yBsUNGw4uP1w+HiJAXTswWn5OI0ZKQVw5GqSwLH12IT9bOwESHxcNDRcfEhEfFw0NFx/+NAAAAP///DME6wC2BywCJgmxAAABBgvfWwAAAP///LoE6wEQBywCJgmyAAABBwvfALUAAAAAAAD///yTBOsBEAcsAiYJswAAAQcL3wC1AAAAAAAA///97gAAA2cHLAImCakAAAEHC+ACEgAAAAAAAP///kUAAALIBywCJgmpAAAAJwmxAhIAAAEHC98CbQAAAAAAAP///swAAAMiBywCJgmpAAAAJwmyAhIAAAEHC98CxwAAAAAAAP///qUAAAMiBywCJgmpAAAAJwmzAhIAAAEHC98CxwAAAAAAAP///kAAAALvBywCJgmrAAABBwvfApQAAAAAAAD///3QAAACxwcsAiYLmgAAAQcL3wJsAAAAAAAA///9YAAAAscHLAImC5sAAAEHC98CbAAAAAAAAP///H8AAALHBywCJgucAAABBwvfAmwAAAAAAAAAAgEr/+MCKQW2AAMAFwAAASMDMwM0PgIzMh4CFRQOAiMiLgIB6Hkz3/AUIi4bGi8iFBQiLxobLiIUAZ4EGPq5JjUhDw8hNSYlNSIQECI1AAAEAOsDxQM2BywAAwAHAAgACQAAAQMjAyEDIwMHAwHOM30zAkszfDM1BQXV/fACEP3wAhDbAjIAAAIARgAABPMFDwAbAB8AAAEDIRUhAyMTIQMjEyM1IRMhNSETMwMhEzMDIRUBIRMhA9c7ARD+1UWQR/7jRo1C/wEZPv7zASVEkEMBH0WNRQEB/PwBHjz+4wMn/smH/pcBaf6XAWmHATeGAWL+ngFi/p6G/skBNwAAAAUAdf/sBioFDgAJAB0AJwA7AD8AAAEUFjMyNTQjIgYFFA4CIyIuAjU0PgIzMh4CARQWMzI1NCMiBgUUDgIjIi4CNTQ+AjMyHgIDASMBAQlGTpmZTkYBwSNKck5JbkwlIklwTUpwTSYBoEZOmZlORgHAIkpxT0lvSiYjSHBNS3BLJv7866ADFwOdeHjw8Hh4VotgMjJgi1ZXil8xMV+K/Wt4d+/wd3lXiWAzM2CJV1eKXjIyXooDRPsGBPoAAwDrA8UBzgcsAAMABAAFAAABAyMDFwMBzjN9M9oFBdX98AIQ2wIyAAQAjP3cAlwHLAATABQAFQAWAAATNBISNjczBgYCAhUQEhcjJiYCAgEDE4wjSW9NpkVsSiSSj6hNb0kjATkFHwKzjwESAQDraG3w/vz+84j+6f3x3WrxAQcBFQLVAjL2sAAAAAAEAHj93AJIBywAEwAUABUAFgAAARQCAgYHIzY2EhI1EAInMxYWEhIBAxMCSCNJb02mRWxKJJKPqE1vSSP+tQXnAqKP/u7/AOtobfABBAENiAEXAg/davH++f7rAcoCMvawAAAAAwCWAlgEWAcsAA4ADwAQAAABAyUXBRMHAwMnEyU3BQMHAwLcKwGNGv6G9bKwnrjy/okdAYcrSgUF9f53b8Ec/rpgAWb+mmABRhzBbwGJ+wIyAAAAAgBmAMcEAgT6AAsADAAAASE1IREzESEVIREjAwHp/n0Bg5YBg/59liQCSZYBhP58lv5+BDMAAAABAD/++AF5AO4ADAAAJRcOAwcjPgM3AWoPDicvMxmKDx0bFgjuFzZ6fHs4PYSDfTUAAAAAAgBkAgwCVAT6AAMABAAAEzUhFQNkAfCPAgyoqALuAAABAJP/4wGRAPoAEwAANzQ+AjMyHgIVFA4CIyIuApMUIi4bGi8iFBQiLxobLiIUbyY1IQ8PITUmJTUiEBAiNQAABABW/dwDGQcsAAMABAAFAAYAAAEBIwEDAxMDGf3nqgIbrAUfBqn4BAf8/lECMvawAAAAAAIAYv/sBAgFzQATACcAAAEUAgYGIyImJgI1NBI2NjMyFhYSBRQeAjMyPgI1NC4CIyIOAgQIM3Gyf3avczkzb7F+d7B0Ov0THkJrTU1sRR8fRWxNTWtCHgLdsf7owmZmwgEYsbEBGMFmZcH+6LKW4JVLSpThl5bglEpKlOAAAAAAAQCyAAACxwW2ABAAACEjETQ+AjcOAwcHJwEzAsewAQMDAREaGx4VlGABf5YDkStiYVkiEhoYGxJ5ewErAAAAAAEAYAAAA/AFywAjAAAhITUBPgM1NC4CIyIGByc+AzMyHgIVFA4CBwEVIQPw/HABXkt2UywiP1Y1X5lFZihcanZBYJtsOzVdgUv+5wKxnAF9UYaAgUw7Wj8gTTx3JD8uGzZlkVtVmpWWUf7VCAAAAAABAFL/7APuBcsAOQAAARQOAgcVFhYVFA4CIyImJzUWFjMyPgI1NC4CIyM1MzI+AjU0LgIjIgYHJz4DMzIeAgPBLlN0R7G4QYTKim3BVVfLXVyGVyk1Yo1ZhYVRflUsJEJcOGujSlwmXW59RmyjbjgEYEl4WDkMBha1kWCgdEAiLaouMihKbENEYT8elyhKZj00UjkeQzZ9HzYpGDZhhQAAAgAXAAAEPwW+AAoAGAAAASMRIxEhNQEzETMhETQ+AjcjDgMHAQQ/1bD9XQKXvNX+ewMEBQEJBxUZGgv+ZQFI/rgBSJ8D1/wwAWQ4e3VmIhQxMS4Q/aAAAAEAg//sA/YFtgAqAAABMh4CFRQOAiMiLgInNR4DMzI+AjU0JiMiDgIHJxMhFSEDNjYCIWOrf0hEhsWAM2NbUiEhWWJjKk98Vi6wqBs/PzkVWjcCsv3sJyBpA4E3bKBpcrZ+QwoTHhSsFyQYDSVOdlGPlwUICQQ5ArCm/l0GDgAAAgBx/+wECgXLACsAPwAAEzQ+BDMyHgIXFSYmIyIOBAczPgMzMh4CFRQOAiMiLgIBMj4CNTQuAiMiDgIVFB4CcRU1XI7GhRMuLysRI1grWolkQyoUAwwUOUxfO1+abDs+dKRmZK+ASgHbPGNIJyFCY0JDb04rJUluAnFp0L+keUUCBQcFmwwMK05sg5RQJD8tGjtypWpytn9ETqDy/rkpU39XRm9OKi9LYDBDhWpDAAABAFoAAAQGBbYABgAAIQEhNSEVAQEZAjP9DgOs/dUFEKaR+tsAAAAABABq/+wEAAXNACcAOgBKAEsAAAEyHgIVFA4CBx4DFRQOAiMiLgI1ND4CNy4DNTQ+AgMUHgIzMj4CNTQuAicnBgYBIgYVFB4CFz4DNTQmAQI1VJVxQihGYDg6b1c1Q3mpZm6rdT0tTGg6MVY/JUNylccgRGhIRmtIJCdJZj8efoABFmp9Iz5XMzBVPyR+ASYFzSxYhFhDbFdFHB9MX3ZJXJVoODZlklxLeGBKHB9JWm1CV4NYLPumNVk/IyNBXDg0VEhAHw48mwNUamU5UkAzGBY0QlQ2ZWr9gwAAAAIAav/sBAQFywApAD0AAAEUDgQjIi4CJzUWFjMyPgI3Iw4DIyIuAjU0PgIzMh4CASIOAhUUHgIzMj4CNTQuAgQEFTVcjsaFEy4uLBEjWCuHrmYrBQ0UOExgO1+abDs/c6VmZa6ASv4lPGNIJyFCY0JEbk4rJUluA0Zp0b6leEUCBQYFnA0MXqHWdyQ+Lho7cqVqcrd/RE6g8wFHKFR/V0ZvTiovS2AwQ4VrQgADAK//4wGtBPoAEwAnACgAADc0PgIzMh4CFRQOAiMiLgIRND4CMzIeAhUUDgIjIi4CE68UIi4bGi8iFBQiLxobLiIUFCIuGxovIhQUIi8aGy4iFH9vJjUhDw8hNSYlNSIQECI1A5EnNSEODiE1JyU0IhAQIjQBRAADAFv++AGtBPoADAAgACEAACUXDgMHIz4DNwM0PgIzMh4CFRQOAiMiLgITAYYPDicvMxmKDx0bFggRFCIuGxovIhQUIi8aGy4iFH/uFzZ6fHs4PYSDfTUC7Sc1IQ4OITUnJTQiEBAiNAFEAAACAGYAnQQCBPoABgAHAAAlATUBFQkCBAL8ZAOc/SEC3/3DnQGoZgHhoP6U/r4DvAADAGYBfQQCBPoAAwAHAAgAABM1IRUBNSEVAWYDnPxkA5z9wwMXlZX+ZpaWA30AAAACAGYAnQQCBPoABgAHAAATAQE1ARUBAWYC4P0gA5z8ZAFfAT4BQgFsoP4fZv5YBF0AAAAAAgCS/+MDkgXLACcAOwAAATU0PgI3PgM1NC4CIyIGByc2NjMyHgIVFA4CBw4DFRUDND4CMzIeAhUUDgIjIi4CAYYPJ0IyMEQrFR45VThTlkY/UbxhXZVoOBs2UDY0QiYOuxQiLhsaLyIUFCIvGhsuIhQBniU5XFBNKilDRU81ME85HzQikSo7M2CLV0NpWlQvLUM/QiwS/tEmNSEPDyE1JiU1IhAQIjUABADd/dwCagcsAAcACAAJAAoAAAEhESEVIxEzAwMTAmr+cwGN6OilBR/+rQf7j/kjBb4CMvawAAAEAFb93AMZBywAAwAEAAUABgAAEwEjAQEDE/4CG6j95QFvBR8Gp/gGB/r+UwIy9rAAAAAABABu/dwB/AcsAAcACAAJAAoAABczESM1IREhEwMTbufnAY7+co8F58QG3I/4BgZNAjL2sAAAAAABADwCJQQsBcEABgAAEwEzASMBATwBy2YBv6H+r/6jAiUDnPxkAt/9IQAAAAH//P68A07/SAADAAABITUhA078rgNS/ryMAAAEAFr93AKzBywAJwAoACkAKgAABRQeAhcVLgM1ETQmIzUyNjURND4CNxUOAxURFAYHFRYWFQMDEwH7Gi9EK0yAXTWBenqBNV2ATCtELxpuampu/gXnJS88Ig0BkwEhRWxNAdhlVJhUZQHZTGxFIQGSAQ0iPC/+KWd5FAsUd2gDSQIy9rAAAAAABAFc/dwCcgcsAAMABAAFAAYAAAEzESMDAxMB35OTfgWDBqf4BgZNAjL2sAAEAG793ALHBywAKQAqACsALAAAATQ2NzUmJjURNC4CJzUeAxURFB4CMxUiBhURFA4CBzU+AzUBAwMBJW5ra24ZL0UqS4FdNSBAXj17gDVdgUsqRS8ZAQQFRQGxaHcUCxR5ZwHXLzwiDQGSASFFbEz+JzNGLBSYVGX+KE1sRSEBkwENIjwvBR8CMvawAAIAZgINBAIE+gAjACQAAAEuAyMiDgIHNTYzMh4CFx4DMzI+AjcVBiMiLgIDAhIlNy0pFhw8OzgZZJQdMjdDLyU3LygWHDw7OBhjlR0yN0N8Ak4QFg0FEyEsGaJsBQ0ZFBAWDQUTISwZomwFDRkCwP//AGQCDAJUBPoDBgv2AAAAAAAAAAIAUgIXA64E+gADAAQAABM1IRUBUgNc/hcCF6ioAuMAAgBSAhcHrgT6AAMABAAAEzUhFQFSB1z6FwIXqKgC4wADAKgD4AHhBywADAANAA4AABMnPgM3Mw4DBxMDtg4OJy40GYkPHRoWCEgFA+AWNnp8ezg9hIN8NQEaAjIAAAAAAwCmA+AB3wcsAAwADQAOAAABFw4DByM+AzcXAwHRDg4nLzMZiQ4dGxYIuwUF1RY3eX16ODyEhHw12wIyAAAAAAQAqQPgA2MHLAAMABkAGgAbAAABJz4DNzMOAwchJz4DNzMOAwcTAwI4Dg4nLjQZiQ8dGhYI/bgODicuNBmJDx0aFghHBQPgFjZ6fHs4PYSDfDUWNnp8ezg9hIN8NQEaAjIAAAAEAKYD4ANgBywADAAZABoAGwAAARcOAwcjPgM3IRcOAwcjPgM3BwMB0Q4OJy8zGYkOHRsWCAJIDg4nLzMZiQ4dGxYIxgUF1RY3eX16ODyEhHw1Fjd5fXo4PISEfDXbAjIAAAAAAwCT/+MF2wD6ABMAJwA5AAA3ND4CMzIeAhUUDgIjIi4CJTQ+AjMyHgIVFA4CIyIuAiU0PgIzMh4CFRQOAiMiJpMUIi4bGi8iFBQiLxobLiIUAiUUIi4bGi8iFBQiLxobLiIUAiUTIy4bGi8iFBQiLxo2SW8mNSEPDyE1JiU1IhAQIjUlJjUhDw8hNSYlNSIQECI1JSY1IQ8PITUmJTUiEEIAAAACAI0A7APdBPoACwAMAAABATcBARcBAQcBAScBAcv+wmkBPQFCaP6/AT9m/r7+w2cBNgKSAT9p/sIBPmf+v/7AZgE9/sVnA6UAAAAABABmAMMEAgT6AAMAFwArACwAABM1IRUBND4CMzIeAhUUDgIjIi4CETQ+AjMyHgIVFA4CIyIuAhNmA5z9vxIfKRgXKiASEiAqFxgpHxISHykYFyogEhIgKhcYKR8SBAJJlpb+9yMvHg0NHi8jIS8fDg4fLwLbIy8eDQ0eLyMhLx8ODh8vASEAAAIAZgJJBAIE+gADAAQAABM1IRUBZgOc/cMCSZaWArEAAQCTAAAD8wT6ACEAAAEGBgcBIwE3MzI+Ajc2NyE1IS4DIyM1IRUhFhYXIRUCzg6urgGx0v5QFFwxSjcoESwN/mwBkggjOFtMiANg/oAjLgkBJgM4gqAa/gQB+ZIIDxcPKkaSJzcsFJKSGVIzkgAAAAH9TP52/i7/ZAAPAAAFMhYWFRQGBiMiJiY1NDY2/b0eMyAhMx0eMyAfNJwfOCAhOB4fNyEgOB8AAAAPALoAAwABBAkAAABeAAAAAwABBAkAAQASAF4AAwABBAkAAgAOAHAAAwABBAkAAwA4AH4AAwABBAkABAASAF4AAwABBAkABQAYALYAAwABBAkABgAQAM4AAwABBAkABwCkAN4AAwABBAkACAAqAYIAAwABBAkACQAoAawAAwABBAkACgBAAdQAAwABBAkACwA8AhQAAwABBAkADACIAlAAAwABBAkADQBcAtgAAwABBAkADgBUAzQAQwBvAHAAeQByAGkAZwBoAHQAIAAyADAAMQAyACAARwBvAG8AZwBsAGUAIABJAG4AYwAuACAAQQBsAGwAIABSAGkAZwBoAHQAcwAgAFIAZQBzAGUAcgB2AGUAZAAuAE4AbwB0AG8AIABTAGEAbgBzAFIAZQBnAHUAbABhAHIATQBvAG4AbwB0AHkAcABlACAASQBtAGEAZwBpAG4AZwAgAC0AIABOAG8AdABvACAAUwBhAG4AcwBWAGUAcgBzAGkAbwBuACAAMQAuADAANABOAG8AdABvAFMAYQBuAHMATgBvAHQAbwAgAGkAcwAgAGEAIAB0AHIAYQBkAGUAbQBhAHIAawAgAG8AZgAgAEcAbwBvAGcAbABlACAASQBuAGMALgAgAGEAbgBkACAAbQBhAHkAIABiAGUAIAByAGUAZwBpAHMAdABlAHIAZQBkACAAaQBuACAAYwBlAHIAdABhAGkAbgAgAGoAdQByAGkAcwBkAGkAYwB0AGkAbwBuAHMALgBNAG8AbgBvAHQAeQBwAGUAIABJAG0AYQBnAGkAbgBnACAASQBuAGMALgBNAG8AbgBvAHQAeQBwAGUAIABEAGUAcwBpAGcAbgAgAHQAZQBhAG0ARABlAHMAaQBnAG4AZQBkACAAYgB5ACAATQBvAG4AbwB0AHkAcABlACAAZABlAHMAaQBnAG4AIAB0AGUAYQBtAGgAdAB0AHAAOgAvAC8AYwBvAGQAZQAuAGcAbwBvAGcAbABlAC4AYwBvAG0ALwBwAC8AbgBvAHQAbwAvAGgAdAB0AHAAOgAvAC8AdwB3AHcALgBtAG8AbgBvAHQAeQBwAGUAaQBtAGEAZwBpAG4AZwAuAGMAbwBtAC8AUAByAG8AZAB1AGMAdABzAFMAZQByAHYAaQBjAGUAcwAvAFQAeQBwAGUARABlAHMAaQBnAG4AZQByAFMAaABvAHcAYwBhAHMAZQBMAGkAYwBlAG4AcwBlAGQAIAB1AG4AZABlAHIAIAB0AGgAZQAgAEEAcABhAGMAaABlACAATABpAGMAZQBuAHMAZQAsACAAVgBlAHIAcwBpAG8AbgAgADIALgAwAGgAdAB0AHAAOgAvAC8AdwB3AHcALgBhAHAAYQBjAGgAZQAuAG8AcgBnAC8AbABpAGMAZQBuAHMAZQBzAC8ATABJAEMARQBOAFMARQAtADIALgAwAAAAAwAAAAAAAP9mAGYAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAMACAAKAA4AB///AA8AAQAAAAwCMgbWBt4AAgBbAAAAQgABAEMAQwADAEQAaQABAGoAagADAGsAdQABAHYAdgADAHcAeQABAHoAegADAHsBSgABAUsBVQADAVYCMwABAjQCNQACAjYCNgADAjcCNwABAjgCOgADAjsCTgABAk8CTwADAlACXwABAmACZAADAmUCiwABAowCjwADApADcgABA3MDfQADA34EpwABBKgErwADBLAEswABBLQExAADBMUE0AABBNEFLQADBS4FLgABBS8FUQADBVIGRwABBkgGVAADBlUHlQABB5YHmgADB5sHpAABB6UHpwADB6gHsQABB7IHtAADB7UHwQABB8IHxAADB8UHzgABB88H0AADB9EIhwABCIgIjgADCI8JPgABCT8JVwADCVgJaQABCW4JbwADCXAJlQABCZYJlgACCZcJnQABCZ4JngACCZ8JoAABCaEJoQACCaIJpgABCacJpwADCagJqwABCawJswADCbQJtwABCbgJuAADCboJvQADCb4JxQACCcYJxwABCcgJyQADCdgJ3QABCd8J4AADCeEJ4QABCeIKDgACCg8KEAADChELAwACCwQLDwADCxALGwACCxwLHAADCx0LXwACC2ALYAABC2ELZAACC2ULZQABC2YLaAACC2kLdAABC3ULmAACC5kLmQADC5oLnAABC50LpQACC6YLtQADC7YLvQABC74LxgACC8cL3AADC90L3gACC98L4wADC+QL6wACAYQAwASYBIADcAMsA1wEBgL2AyADNgL6A7oDAAMGAwwDDAMwAz4DJgRMBEwDEgMSBGgEbARwBHQDGASYBJwEoASAAxwDHAMcAyAESARIAyYDqgQOBAYEDgPYAywDXAQGA6oEDgOWA5IDeANIAzADNgNCAzoDlgOSA3gDSARIBEgDTAM+A1IDWANCA5YDkgN4A0gESARIA0wDUgNYA1wDYANcA2ADZAPiA5oDaANsA2QD4gQKA2gDbARoA3gDfANwBHQDdAOIA3gDfAQ8A4ADhAOIA4wERAOSA5YDmgOeA6IEYAOmA6oDrgO0A7oDwAPGA8wD0gP+A9gD3gPiA+YD6gPuA/QD9AP0A/gD/gP+BFQEWARYBJAEkAQCBAIEBgQKBAoEEgQOBBIEbASEBBYEHAQiBCgELgSEBEgERAQ0BEwEOAQ8BEAERARIBEwETARMBFAEUARQBFQEVARUBFgEXARgBGQEZARkBGgEbARwBHQEeAR8BIAEhASKBJAElASYBJwEoAACAD0JbglvAAAJdgl5AAIJfAl8AAYJggmCAAcJhAmEAAgJhgmGAAkJiAmIAAoJjAmPAAsJkwmTAA8JlQmVABAJmAmYABEJnQmeABIJoAmhABQJrAmzABYJuAm4AB4Jugm6AB8JvAm+ACAJwgnEACMJxgnJACYJ3AncACoJ6AnqACsJ8wn0AC4J9wn3ADAJ+Qn5ADEJ/An9ADIKAQoBADQKDwoPADUKVwpXADYKWQpZADcKWwpbADgKXQpdADkKYQpkADoKaApoAD4KagpqAD8KbApsAEAKcwpzAEEKewp7AEIKfwp/AEMKgQqBAEQKhQqIAEUKjAqMAEkKkAqQAEoKlwqXAEsK7wrwAEwK8wr0AE4K9wsAAFALBAsPAFoLHAscAGYLMQsxAGcLNAs3AGgLOQs5AGwLOws9AG0LPws/AHALQwtIAHELSgtKAHcLVgtWAHgLWAtYAHkLWgtbAHoLXgtkAHwLpgvFAIMLxwvjAKMAAQA2AAIARABFAAIAIgAjAAIAJwAoAAIANAA1AAIAQgBDAAEADgABAAQAAgAZADoAAgAAADIAAQA8AAIANQA2AAEAFQABABkAAQAGAAIAGQA5AAEAJwACADIAMwACAAAAMwABAEIAAQBJAAEAWwABADcAAQBUAAEATQABACoAAQA1AAEAIgABAC4AAQAyAAEAPgABACEAAgAZADMAAQBMAAEARAABAFEAAQBFAAEAXQABAGYAAQBcAAIAWQBaAAIAPgA/AAIATABNAAIAXQBeAAIARwBIAAIAZwBoAAIARQBGAAIANgA3AAEAUAABAEYAAQBYAAEAUwACADoAOwABADoAAgBeAF8AAQAtAAEAOQABAE4AAQBeAAEAYQABAHEAAgBKAEsAAgAkACUAAgAlACYAAgAzADQAAgAxADIAAQBKAAEAJQABADMAAQAxAAEAQwABADQAAQAkAAEAJgABAB0AAQAwAAEARwABAFkAAQAXAAEAHwABACAAAQAYAAEAKQABAEAAAQBSAAEAEAACAEMARAACAFoAWwABACgAAQA9AAEAGgABABEAAQAjAAQAAAABAAAAAgAfAmACYgACAmQCZAADAowCjwACA3MDcwACBOQE9AACBPYE+QADBPoE+gACBPwFAAADBQMFBQADBQgFEgADBRgFGwADBRwFHgACBSEFIwACBSQFJAADBSUFJQACBSYFKAADBSkFKwACBSwFLQADBS8FMQACBTIFNQADBTYFNgACBTgFOQADBToFOgACBUIFTgACBkgGSQACBkoGSgADBksGUQACBlIGUgADBlMGUwACBlQGVAADCIsIjgABAAEAAAAKAHAA+gAFY3lybAAgZGV2MgAwZGV2YQAwZ3JlawBGbGF0bgBWAAQAAAAA//8AAwAAAAMABgAKAAFNQVIgAAoAAP//AAMACQAKAAsABAAAAAD//wADAAEABAAHAAQAAAAA//8AAwACAAUACAAMa2VybgBKa2VybgBKa2VybgBKbWFyawBQbWFyawBWbWFyawBWbWttawBsbWttawBsbWttawBsYWJ2bQB0Ymx3bQB6ZGlzdACAAAAAAQALAAAAAQABAAAACQAAAAEAAgADAAQABQAGAAcACAAAAAIACQAKAAAAAQAMAAAAAQANAAAAAwAOAA8AEQAUACojTiSWJ04pSDy2TNZN3lqWW3peZmCGnqSjtKeGqaKq5qr8rFysbAAEAAAAAQAIAAEADABkAAMBRgNqAAIADgJgAmIAAAJkAmQAAwKMAo8ABANzA3MACATkBPoACQT8BQAAIAUDBQUAJQUIBRIAKAUYBR4AMwUhBS0AOgUvBTYARwU4BToATwVCBU4AUgZIBlQAXwACACUAJAA9AAAARABdABoAggCYADQAmgC4AEsAugFCAGoB+gIBAPMCNwI3APsCTgJOAPwCUwJWAP0CXAJfAQEDGgMdAQUDMgM3AQkDQgNJAQ8DXgNhARcDbANxARsDfgOzASEDtwO4AVcDugO6AVkDvwPPAVoD2APYAWsD3QQZAWwEHgQfAakEIgSeAasF9AYiAigGVQZaAlcGXQZmAl0GawZuAmcGcQaAAmsGgwacAnsGpQasApUGrwbIAp0GzQbmArcIAwgDAtEIUQhRAtIIYghoAtMIcwhzAtoIdgh2AtsAbAAAAcQAAAHEAAABxAACAbIAAAG4AAATbAAAAb4AAAG+AAABxAAAW+IAAFviAABclAAAW+IAAAHKAABb4gAAAdAAAFviAAAB1gAAW+IAAAHcAAAB3AAAW+IAAFviAAAB4gAAAeIAAAHiAAEB6AACAh4AAgIeAAICHgACAh4AAFviAAIB7gACAh4AAgIeAAICHgACAh4AAgIeAAICHgACAh4AAgIeAAICHgACAh4AAgIeAAICHgACAh4AAgIeAAICHgACAh4AAgIeAAICAAACAfQAAgIeAAICHgACAh4AAFusAABbrAAAXJQAAFviAAAB+gAAW8QAAgIeAABbxAACAgAAAgIeAAICHgAAW+IAAAIYAABb4gACAh4AAgIeAAACGAAAAhgAAFviAAICHgACAh4AAgIeAAICHgAAW+gAAgIeAAICHgAAAhgAAAIGAAACBgAAAgYAAAIGAAACBgAAAgYAAAIGAAACBgAAAgYAAAIGAAACBgAAAgYAAAIGAAACDAAAAgwAAgIeAABb4gAAW+IAAFviAABb4gAAW+IAAFviAABb4gACAhIAAAIYAAICHgAB/YsAAAABAjoEfgABAk8EsAAB/YsEsAABAAAEzgAB/ZQEsAAB/7AEsAABAAAEiAABAAADhAAB/2YDmQABABQAAAAB/+wAAAABAAAETAABAAAAZAABAAAB4AABAAAE4gAB/+IAAAABAAAEfgABAAAAAALcIGodeEnuFIQfcBSWNMo21FImHKw33FXOHFIdeFbuHFIdeFbuEjIcEFaUHEAzYhwuHEwcUhb6H44fWBEqHHw21BxqH2Qfako2HOIc6BzQIHYdHh0MIIIZ4lMEIHAfcFboIIIZ4hEwIIgfcFXyH2QdeEo2HfYfcB3YHiAzYhhQHkQ21B4+Hmgebh5cHFIepFbuH2QepEo2Hs45Ah62IPQRNlJEG6QfalLUG1YZIhvIG+Ab5hSWIKwRNko2Gxob/koeG1YRThFUHuwgmlboG24fWB9eG24fWBn6HIgcjlJEETwfWB9eHPQRQhzcIL4RtFboIL4RqFboHiwRtB1IEUgRThFUHWAZKB1UHX4RWh2WGsARYB1UHiwRtFLUIMQfTFasHnoRZh8QIMQfTFasIMQfTB8iHtoZIhrMFqYfcEnuFqYfcEnuFqYfcEnuF5YfcEnuF5YfcEnuEWwfcEnuFMYUolZYNMo21BFyFsQdeFbuFsQdeFbuFsQdeFbuHowdeFbuFugcUhb6FugcrBb6Fugg7hb6FvQg7hb6HKw33FXOHQYdHh0MFbwZ4lMEFbwZ4lMEFbwZ4lMEF7QZ4lMEF7QZ4lMEIIIZ4lMEFcIzYhhQFcIzYhhQFcIzYhhQFzwzYhhQFsQepFbuIHAfglboEXgRflbuFtASklJEFtASklJEFtASklJEEYQfoFJEWlgSkh3YEYoRkEo2GSIRllIsG1YZIh+IFaoSkko2FaoSkko2FaoSkko2E+gSkko2Fu4RnB9eFu4ZTB9eFu4ZTB9eGIAZTB9eFnwRolQYHTYfglLUFpobDlboFpobDlboFpobDlboFpobDlboF7obDlboIL4RqFboHTYfglLUHTYfglLUHTYfglLUEa4fglLUEbofoBHAG6QRtB1IEbofoBHAEcYUkEnuEcwSkko2F5YUkEnuFwwSkko2IGodeBHSIKwfLhIOEdgcEFImGLAflBvIEeQcEBHqEd4bSh3YF7QcEFMEHFIbSlbuEeQcEBHqFwwflEo2EfA33FXOG+AzYhSWHKw33FXOG+AfcBSWEfYdeFbuEfwSkkoSHowdeFbuEgIflEoSHTwdeFboEiYflFbuHFIdeBIIIKwfLhIOEhQdtFZkEiYfoFbuEhocEFRmEiYcHBeoEiAcEFIsEiYcHBeoEiwcEFIgFGAcHBeuEjIcEBI4Ej4cHBoMF2AzYhwuEkQSSlboHEAzYhwuHuwgmlboElAgghb6G24Yqh9eElYgghb6ElwYqh9eFvQgghb6G24Yqh9eHEwcUhJiG24fWBKqFvQcUhb6GvYa/B9eHFIUkBJoEm4YdBJ0EnoUZhKAG24Tjhn6HHw21BKGHIgcjhKMIPQSklJEHYofako2EpgSnh9eH2QfahKkH1IfWBKqH2Qfako2H1IgcB9eH2Qfako2H1IYIB9eH2Qfako2H1IfWB9eFpQdHh0MHRIfglboIHYdHhKwIL4fghK2FpQdHh0MHRIfglboErwaeBLCIHYdHhLIIL4fghw6FhAZ4lMEFhwbDlboF7QZ4lMEHRIbDlboFbwZ4lMEHRISzlboH3AS1FWwEtoS4FbEEuYfcFXyFyQZoB1UIIgfcBLsHWAdZhLyEvgfcBSWFyQZxB1UHYodeEo2Ev4fLh2WHYodeEo2HcAewkn6H2QdeBoMHX4dxhlkHYodtEo2EwQW4lSWHfYTChMQGsATFhMcEyIfcB3YGsAdZh1UHfYfcB3YGsAeCB1UEygzYhhQHUIfglLUEy4zYhhQEzQfglLUFzwzYhhQHUIfglLUEzoUMBhQE0ATRlLUFcIzYhhQHUIfglLUHiAzYhNMHiwfghNSE3webh5cE1gegB8QHYoepEo2IOgfoB8iHp4epEo2E2Q5Ah62E2ofLhrME145Ah62E2oe4BrME2Q5Ah62E2oTcBrME3YgiB7+IIIWCheoE3webh5cE4IegB8QE3webh5cE4IegB8QHlYebh5cHmIegB8QHYoepEo2FrIfoB8iGvYTiBn6Fu4Tjhn6E5Qc6BzQE5oZFhOgIGodeBOmIKwfLhOsIIITslMEIL4aeFboHiATuBhQHiwTvlLUIGodeBPEIKwfLhyaE8oT1knuE9wT4ko2HFIdeByyIKwfLhyaE9AT1lbuE9wT4ko2HowfcFbuE+gbDko2E+4T9Bb6E/oUAB9eHEwcUhQGHF4fWBy4IIIZ4hQMIL4bDh7yFBIUGFMEFB4UJFboHiAzYh4UHiwfgh4aFCoUMBhQFDYUPFLUH2QepByaIMQfoB8iFEIUSEo2FE4UVB8iHp4epEo2HqofoB8iHfYfcBTeGsAeCBRaHUIfalLUNMo21FImFMwdtFLUFGAdeFZkHKw19lXOHUIaqFLUIJQUkFasFGYUbFUOGegUchvIHKw33FXOFHgUflRmFIQ5AhSWFIoUkBSWHRI19lboGtggmlNkIO4ZNFa+FMw19lLUIHAdeBw6IIIUnFMEG4YU9hoMHI4UolbEFKgUrhrGFLQcUhb6HHwW3BxqFLoaqFJEG24fWB9eFyQcUhvIHbQUwFeWIHYdHh0MIL4fghw6IIIZ4lMEHXgV1FHqGSgZplMENMoUxlImHUIfcB1IFMwfcFLUG4Yb5lJEHX4U0h2WG4YdtFJEFNgYPhleGsAeCBl8FPwYJlZkGhIg7h9GHfYfcBTeFOQU6hTwHkQU9h4+H2QU9ko2IMQbDh8iHs45Ah62Htoe4BrMH3wdeBvIFPwfcFZkFQIVCFX4FQ4VFBUaIKwWuBoMFSAVJhUsFTIVOBZYFT4VRFOIFUoVUBVWFVwVYhVoFW4VdBYWFXoVgBWGFYwaJBWSFZgX/BWeFqYVpEnuFaofoEo2FugVsBb6Fu4Vth9eFbwzYlMEFpoflFboFcIYMhhQHTYfoFLUFcgfLlJEFc4V1BXaFeAV5hXsFfIZ4hX4G1YWgh1IFf4Z4hp+FpoWghw6HGQZNBxqFgQWCkoSIIIzYhYWIL4fLhw6FhAzYhYWFhwbShw6FiIYblX4FigWLhY0FjoWQBZGFkwWUhZYFl4WZBZqFnAWdlaUFnwWgh1IHKwWiEowIO4zdBaOFpQ2LB0MFpoWoFboFqYWrEnuFrIWuEo2F5YzdEnuFwwWvko2FsQWylbuFtAW1lbuHowW3FbuHFIW4lbuFugguBb6Fu4ZRh9eFvQgiBb6G24ZRh9eFwAXBlMEFwwflFboF7QZ4lMEHRIfLlboHTAXElXOFxgaBh7+IGQXHlXOFyQaBh1UFyoXMBhQFzYYnlLUFzwXZhhQHUIYnlLUF0IgjhdIF04XVBdaF2AXZhwuHGQdeBxqF2wYYhdyG+AdeB1IF3gaqBd+HuwXhFboHs4XiheQHtofLhtQF5YzdEnuF5wfLheiHFI21BeoIKwfLheuF7QZ4lMEF7ofLlboF8AzYko2F8YflB8iH1IYhhfMIL4flBfSGsAX2BfeF+QX6hfwF/YX/BgCIGoYCBgONMoYFBgaGCAYJhgsH2Qfako2HfYYMhg4HX4dxhlkHtoe4BnKGD4aqB9GH0AYRB9GGEodeFa+HiAzYhhQGFYfcBhcHFIYYhhoH3wYbhvsH44YdBh6GIAYhhn6GIwYkhiYG1YYnhvIIIg21FXOHWAYpB1UH2QzYko2IMQflB8iINwflFbuIPQcHEo2HiwfoFLUHUIfoFLUINAYqkokG1YZ3BvIGLAb5hoMH3w2FEo2G1YbShvIG1YbShvIG1YaNhvIGMIdZhjIG2IYth7+G2IbFB7+IKwflEo2GvYa/Bn6H3w2FB+IG1YcHB+IIKwfLko2GegflBi8GMIfoBjIHiwfgh1IHRIflFboHRIflBw6HF4fWB9eGM4Y1BjaGOAY5hjsGPIaQhj4GPIaQhj4G24fWBn6GP4ZBBkKHPQZEBzcHPQZEBzcHPQZFhzcIL4fghw6IL4fghw6GrQfghkcIL4foFboGSIc+lIsGSgZplMEGS4ZNBk6H0AZRh9GGUAgiB9GH0AZRhl8HWAdZhpIHWAdZhpIINAZTEokGVIZWBleIPQfLlJEINwflFbuHX4dxhlkGhIcUhl8GhIcUhl8GWoZcBl2GhIfZBl8GYIZiBmOGsAeCBmUHiwfglLUIL4ZmlboINwbDlbuGegZoFSWHnoZph8QGawaqFasGbIewhm4Htoe4BnKHtoe4Bm+HtoZxBnKHtoZxBnKGhIggh9GGxoggkoeGdAg7hnWG1YZ3B+IIIIZ4lMEIPQfLlJEG1Ye4BvIGegZ7lboGfQcHBueHF4fWBn6G5IfLhoAHWAaBh1UH3w33BoMGhIggh9GGxoggkoeGh4aJFJQGh4aJBoYGh4aJBoqGjAaNho8GkI19hpIGk4aVBpaGmAaZhpsGnIaeBp+GoQaihqQGpYaqBqcGqIaqBquGrQfghq6GrQfghq6HuwfalboG+Ab5lLUGxob/koeHPQbLBzcIL4fglboIL4fghw6HWAdZh1UINAeCEokHX4dxh2WGsAeCBrGHtoe4BrMIKwa0h1IGtgfahreG1Ya5B+IGuoa8FHqGvYa/B9eGvYa/BsCIL4fghw6GwgbDlZkHiwbFFLUHuwfah0qG+Ab5h4yGxob/hsgG1YcHBw6HIgcjhsmH1IfWBzEHPQbLB0AIL4fgh0qIL4fghw6HWAdZlHMHX4dxhsyGzgbhhs+IMQflBtEG1YbShvsHtoe4BtQIKwfLhyaG1Yfgh7yG+AbXBw6IKwfLh4yIMQdxlHGG2IdxhtoIMQfLhyyG24fWBt0H0AdZht6G4AbhhuMHiwfgh4yG5Ie4FPEG5gfcBueG6QfalLUG6ofcBuwG6QbvB4aG6ofcBuwG7YbvB4aG8I33Fa+G+Ab5hvIHKw33BvOG+Ab5h26HKw33BvOG+Ab5h26HKw33BvUG+Ab5hvaHKw33FVQG+Ab5hvsHFIdeBy+IKwfLlHGHFIdeBy+IKwfLlHGG/IdeFZkG/gb/hwEHAocEFRmHBYcHBwiHCgzYhwuHTwgmlboHEAzYhxGHuwgmh7yHCgzYhwuHTwfcFboHEAzYhw0Huwgmhw6HEAzYhxGHuwgmh7yHEwcUhxYHF4fWBzEHGQ21BxqHHAcdlJEHHw21ByCHIgcjhyUHHw21ByCHIgcjhyUH2QfahyaH1IfWBy4HKAfahyyHKYcrBy4H2QfahyyH1IfWBy4H2Qfahy+H1IfWBzEHMoc6BzQHNYc+hzcHOIc6BzuHPQc+h0AHQYdHh0MHRIfglboIHYdHh0YIL4fgh7yIHYdHh0YIL4fgh7yIHYdHh0kIL4fgh0qHTAfcFboHTYfgh1IHTwfcFboHUIfgh1IIGQfcFXyHU4dZh1UIIgfcB1aHWAdZh1sIIgfcB1aHWAdZh1sHp4deEo2HXIdxh2WH2QdeB26HX4dxh2EHYodtEo2HZAewh2WHZwdoko2HagdrlSWHp4dtB26HcAdxh3MHdIfcB3YHd4d5B7+HfYfcB3qHgIeCB3wHfYfcB3qHgIeCB3wHfYfcB38HgIeCB4OHiAzYh4UHiwfgh4aHiAzYh4mHiwfgh4yHiAzYh4mHiwfgh4yHjg21B4+HqofoFasHkQ21B5KIMQfoB5QHlYebh5cHmIegB8QHmgebh50HnoegB6GHowepFbuHpIfoB6YHowepFbuHpIfoB6YHp4epEo2HqofoB8iHrA5Ah62Hrwewh7IHs45Ah7UHtoe4B7mHs45Ah7UHtoe4B7mHuwgmh7yHvgg7h7+HwQfCh8QHxYfHB8iHygfLlJiHzQg7h86H0Afph9GINwfTFbuH2Qfako2H1IfWB9eH2Qfako2IHAfcFboIIgfcB92H3wfgh+IH44flB+aIKwfoEo2H6YfrFUOAAEBGP5GAAECiv+IAAEDhASwAAEBCAaQAAEGpASwAAECOgSwAAEEOASwAAECgP4UAAEDSASwAAECvASwAAEGDgSwAAECjgcwAAEDDP4UAAECWAaQAAEEGgW0AAECMAYYAAECRAakAAEDrAV4AAEGQASwAAECCATsAAEEEATsAAEEGgSwAAECdgYYAAEETASwAAECCgZAAAECCv4UAAECjgcIAAECRAXIAAECjv48AAEDDAeUAAECOgYsAAEDFgeUAAEDFgAAAAECqAeUAAECWAcIAAECTgXIAAECTgYsAAECWP48AAECRP5uAAECYgeUAAEDSAeUAAEDXAdsAAECWAYsAAEDZgdsAAEDNAYEAAEDNP48AAECRAZUAAECbAeoAAED/AZAAAEBWwcwAAEBWwcIAAEBCAXIAAEBW/48AAECWP5uAAECDgYsAAECDv4UAAEBGAeUAAEBGP5uAAECsv48AAECMP48AAEDwATsAAEBCAe8AAECvAZoAAECRP48AAEBCP48AAEDCv48AAECbP48AAEC/QSwAAEC/QAAAAEDCv5uAAEEYATsAAEG4AYEAAEDwASwAAEG9ATsAAEClAeUAAEClP48AAEBGP48AAECgAeUAAEB1gYsAAEB9AYsAAEEoAYEAAECOv4+AAECDATsAAEBwv4+AAECOgeUAAEC7QcwAAEC7QcIAAECdgXIAAEC7QfQAAECdgbMAAEETAV4AAEC7f48AAECdv48AAEDJQYsAAECSQcwAAECSQeUAAEB4gYsAAEDjgTsAAEBuAZoAAEDuQeUAAEDJQZAAAEBfASwAAECbATsAAEDoQeUAAEDvgZAAAEDvgAAAAECjv32AAECRP32AAEF/AYEAAEGJAYEAAEFKATsAAECjv6YAAECjggMAAECWAgMAAED6AakAAECRAa4AAEDhAVQAAECRAYYAAEBWwgMAAECgAakAAEBCAa4AAECNAVQAAEBW/6YAAEDIP6YAAEDIAgMAAEFPAakAAECbAa4AAEEGgVQAAEC7QgMAAEFFAakAAECdga4AAEETAVQAAECRAgMAAEEagakAAECCAa4AAED6AVQAAEBpP4UAAECYgYsAAEC0AYEAAEF8AYEAAEE2ATsAAEDSAYEAAEFjAYEAAECgAYEAAECgAYsAAEEJAYEAAECgAAAAAEGaAYEAAEGkAYEAAEBGAYsAAEBaAYEAAEBWwYsAAECMAYsAAEHbAYEAAEEfgYEAAECdgYEAAEDdwTsAAEBLAZoAAECOv4UAAEDIQYEAAEF3AYEAAEDIQAAAAEEYAYEAAECYgYEAAECJgYEAAED2gYEAAEBygSwAAEDNQTsAAEBygAAAAEIAgeUAAEKNgYsAAEIAgAAAAEHvAZAAAEJYATsAAEGzQZAAAEH+gTsAAEFRgYEAAEH7wYEAAEFRv5wAAEFPAYYAAEGbgTsAAEFPP4UAAEDIAYYAAEDVgTsAAEHOAYEAAEJLgYEAAEHOP5wAAEHHAYYAAEHHP4UAAEGBAYYAAEGBP4UAAEEiAYEAAECRAZAAAEChQYEAAECTgTsAAEDIAeUAAEC7QeUAAECEgSwAAED8gcIAAEHdgYEAAED8gAAAAEDewXIAAEGCwTsAAEDewAAAAEDNQYEAAEDNQAAAAEDSQeUAAECTge8AAEDmAYEAAEDIAcIAAEDIP4UAAECbAXIAAECJgeUAAEBywZAAAED8ATsAAEBy/4UAAEH+AYEAAEKKwYEAAEH+AAAAAEHvASwAAEJOQTsAAEHvAAAAAEG0ASwAAEH5ATsAAEG0AAAAAEDNAeUAAEE+AYEAAECHAZAAAEEBgTsAAEHzgTsAAECvP4UAAEDCgeUAAECbAZAAAEDrgTsAAECjgeUAAEEBgYsAAECCAZAAAEDuQTsAAEDtgTsAAECWAeUAAEExAYsAAECMAZAAAEDowTsAAEExAYEAAEDogTsAAEBWweUAAEBCAZAAAEBWwdsAAEBWwAAAAEC+AeUAAEFPAYsAAECRAYsAAEEzgYsAAEBuAZAAAEEzgYEAAEB4AYsAAECvAeUAAEFKAYsAAECTgZAAAEC7QdsAAECIgYEAAECIgAAAAEBtASwAAECpgTsAAEBtP4UAAEC9weUAAEFKAYEAAEC7wYEAAEC7wAAAAECrwYsAAECrwAAAAEDYQYEAAEEfwYEAAECSf5wAAECjgdsAAECQwYYAAECQwAAAAECWP4UAAECYv4UAAEDIAdsAAECbAYYAAECRAcIAAECCAXIAAEBbP/YAAECbP/YAAEBzAXIAAEBfP/YAAED4wZoAAEF6gYEAAED4wAAAAED4wSwAAEGAATsAAED4/4UAAEDjQYEAAECjv+IAAEFcQYEAAEDDP+IAAEB9AYEAAEEdAYEAAEB9P5wAAEFJAYEAAECOv+cAAEBkAYEAAEC2gTsAAECZAYEAAEC7QAAAAECbgYEAAECbgAAAAEE7QYEAAECWP+IAAEEUwYEAAEDcAYEAAEBGP5wAAEBCAYYAAEBQAYEAAEC3gYEAAEFLwYEAAEC3gAAAAED8gTsAAECWATsAAEC0ATsAAECHAYsAAEC+ATsAAEB9P4UAAEB/gSwAAEB/gAAAAEBBASwAAEBVATsAAEBBAAAAAEBRwSwAAECMATsAAEBRwAAAAEBgwYsAAEBgwAAAAECqAYsAAEEJATsAAECqP4UAAEGzATsAAEGuATsAAEChwAAAAEDXASwAAEDIASwAAEC5gYsAAEE2AYEAAEC5v4UAAEBkAYsAAEClATsAAECqATsAAEBVASwAAEB9ATsAAEBVP4UAAEB1v4UAAEA3ASwAAEBQATsAAEA3P4UAAEBkP4UAAEBQAXIAAEB4AXIAAEBQAAAAAEBfP4UAAEEagTsAAEDNATsAAEFeATsAAECCAZoAAEB5QSwAAEB5QAAAAEB4v9WAAEDcATsAAEB4v4UAAEBVAZoAAEBVAAAAAEDegTsAAEFPAYEAAEB9ASwAAEFFATsAAECjASwAAEBCP4UAAEBzP4UAAEDDATsAAECRP4UAAEBkAZoAAEFjP4UAAEFjASwAAEHRATsAAEFjP9WAAED/ASwAAEFZATsAAED/AAAAAEB4AYEAAEB4P4UAAEEsASwAAEFyATsAAEEsAAAAAEEXQSwAAEFtATsAAEEXf4UAAEDSQSwAAEEsATsAAEDSQAAAAEDBwSwAAEEdATsAAEDBwAAAAECCQYEAAECCQAAAAECXAYEAAED1AYEAAECXAAAAAEChwSwAAECh/4UAAEBGAXIAAEBfAAAAAEB4gAAAAED0QTsAAECEgYEAAECEgKUAAEDxgTsAAED6AYsAAEGcgYEAAEBCASwAAEBfATsAAEBbAAAAAECYgSwAAEEGgTsAAEEfgTsAAEBzAZoAAEBQP5wAAECMP5wAAEGpATsAAEB1v5wAAEBXQZoAAEBXf4UAAECCP5wAAEDrATsAAEB4v5wAAECHASwAAEE+wYEAAEBuASwAAEB4P5IAAEBCAYsAAEBCP5IAAEBkP5IAAEBfAZoAAECMAYEAAEBfP5IAAEBzASwAAECjAdsAAECjAAAAAECdgZoAAECigYEAAECiv6YAAECdgZUAAEDogYEAAECvAdsAAECHAAAAAECqP6YAAECqP4+AAECHP4+AAECHAZoAAEEOAYEAAECHP5wAAECYgdsAAEB3weUAAEDAgYEAAEB3wAAAAEDSAcIAAEFAAYEAAECUgXIAAEEOATsAAECUv4UAAEC9wdsAAEC9wAAAAEC9/4UAAECbP4UAAEC9wYEAAEC9/6YAAEBWwYEAAECWAYEAAEBW/5wAAEBCAYEAAECsgeUAAECsgAAAAECMAe8AAEEGgZAAAECsgYEAAECsv6YAAECMAZoAAEDwAYEAAECMP6YAAECRP6YAAEBIgcIAAEBCAdYAAECqAYEAAECWP6YAAEBCP6YAAECWP5wAAEBCP5wAAEDoQdsAAEDoQAAAAEDvQYsAAEDvQAAAAEDoQYEAAEGfAYEAAEDof6YAAEDvQSwAAEGfATsAAEDvf6YAAEDCgdsAAEDCgAAAAECbAYsAAEDCv6YAAEFUAYEAAEDCv5wAAECbP5wAAECbAeUAAECdgZAAAECbAdsAAECdgYsAAECdv4UAAEB4AYYAAEB4AAAAAEClP6YAAEB4ASwAAEDIATsAAEBGP6YAAEB1gYYAAED6AYEAAEB1gSwAAEB1v6YAAECRAeUAAEB1gZAAAEB1gAAAAECRAg0AAEEGgbMAAEB9Ab0AAEDmAWMAAEEGgYEAAECHP6YAAEB6gYsAAEDSATsAAEB6v6YAAECOgdsAAECOgAAAAEBLAcIAAECvAXIAAECOv6YAAEBzP6YAAECOgYEAAECOv5wAAEBLAXIAAECvATsAAEBuP5wAAEC7f6YAAECdv6YAAEC7QYEAAEC7f5wAAECdgSwAAECdv5wAAECZwdsAAECZwAAAAECZwYEAAECZ/6YAAECCP6YAAEDuQdsAAEDuQAAAAEDJQYYAAEDuQYEAAEHOgYEAAEDuf6YAAEDJQSwAAEGDgTsAAEDJf6YAAECWAdsAAECHQYYAAECHQAAAAECRAdsAAEEagYEAAECCAYYAAECSQdsAAECSQAAAAEB7AZAAAEDmATsAAEB7AAAAAECSQYEAAECSf6YAAEB4gSwAAEDXATsAAEB4v6YAAECbAZoAAECbP6YAAEBSgcIAAEBuAAAAAEDJQakAAEGDgVkAAEDJQAAAAECCAakAAED6AVkAAECCP4UAAECIwaQAAEDhATsAAEBzgeUAAEBzgAAAAEBkASwAAEBkAAAAAED6ASwAAEBCAZoAAEBfAYEAAEBCAAAAAECRAYEAAEDhAYEAAEETAYEAAEClP4UAAECHAYEAAEETATsAAECHP4UAAEBGAYEAAED1ATsAAEBGP4UAAED6ATsAAEC0ASwAAEEuQTsAAQAAAABAAgAAQAMABgAAQBQAG4AAQAEAmACYQTlBO0AAQAaAZwBqAGqAa8BsgGzAbgBvQHFAccByAHJAcoBzwHSAdMB2AHdAeUB5wHoAekB6gH2A48D2AAEAAAAEgAAABgAADm6AAA5ugAB/ZQEnAAB/VgEnAAaADYAPABCAEgATgBUAFoAYABmAGwAcgB4AH4AhACKAJAAlgCcAKIAqACuALQAugDAAMYAzAABAmwHMAABApQHbAABAo4GBAABAmwGBAABAwoGBAABAyoHbAABAyAGBAABApQGBAABA1wGBAABAggGBAABA6wGBAABAsYGBAABAisEsAABAkQEsAABApQEsAABApQGLAABAmwEsAABAggEsAABAwwEsAABAaQEsAABAvMEsAABAlgEsAABAk4GBAABAggGLAABArwGBAABAjAEsAAEAAAAAQAIAAEADAASAAEAlACgAAEAAQTDAAIAFQAkAD0AAABEAF0AGgCiAKgANACqALEAOwC0ALgAQwC6AL8ASADBAMEATgDzAPMATwEVARUAUAPYA9gAUQQ/BEEAUgRDBEMAVQRHBEcAVgRKBEsAVwRNBE0AWQRTBFMAWgRXBFcAWwRZBFkAXARkBGUAXQR4BHkAXwR7BHsAYQABAAAABgAB/skC0QBiAMYAzADSANgA3gDeAOQA6gM4APAA9gD8AQIBCAEOARQBDgEUARoBIAEmASwBMgE+ATgBPgGSAcIBRAFKAZ4BUAFoAcIBpAFWA6oBXAFiAcIBtgHCAWgBbgF0AXoB/gGqAYABhgGqAYwBkgGSAZIBkgGSAZIBmAGeAZ4BngGeAaQBpAGkAaQBtgG2AbYBtgG2AbYB/gH+Af4B/gGqAaoB5gGwAc4BtgG8AcIByAHOAdQB2gHyAeAB5gHsAfIB+AH+AgQCCgABAuEFvAABBGAEfgABBGQFdQABBO0EfgABA9MFtgABBLwFdQABBNoFtgABAVQFtgABBAYFtgABAVcFtgABBlMFtgABBSYFtgABBW4EfgABBEIEfgABA4QFeAABBD4FtgABBPYFtgABBH4FtgABBvQFtgABA/wFtgABA+gFtgABA0gELQABBBUGFAABAt0GDgABAUAESgABAToGFAABBpoDhAABBBUESgABAu4ESgABAv4EFAABAoYESgABBcgESgABA2YESgABAzwESgABA5gDhAABBjYDhAABA9QDhAABAToESgABA6wESgABBuADhAABA+gDhAABA/wESgABBBADhAABAwwDhAABA6wDhAABAyAEEAABAyoDhAABA6wEXgABATsESgABAeAESgABBAYDhAABBlQESgABBBwESgABBHIESgABAmIESgAEAAAAAQAIAAEADAASAAEAWABkAAEAAQT7AAIACwAkAD0AAABEAF0AGgCUAJUANACXAJcANgCbAJwANwC0ALUAOQC3ALcAOwC7ALwAPAEqASsAPgNGA0kAQANeA2EARAABAAAABgAB/y0E8ABIAJIAmACeAKQAqgCqALAAtgC8AMIAyADOANQA2gF2AOABdgDgAOYA7AGCAPIA+AD+AP4BBAEKAUABEAEWARwBIgFGAUABKAEoAS4BNAE6AUABfAFAAUYBTAFSAVgBiAFqAV4BZAFqAXABdgF2AXYBggGCAXwBfAF8AYgBiAGCAYgBdgF8AXYBfAGCAYgBggGIAAEC3QW8AAED6AV4AAEEfgWCAAEDyAV4AAED9AW2AAEE2AWCAAEFIQW2AAEB4AW2AAEBcQW2AAEETAW2AAEBewW2AAEGdwW2AAEFSgW2AAEDmAV4AAEDqAV5AAEEWAW2AAEEiAW2AAEHEgW2AAEEEAW2AAEEKgW2AAEDXAPoAAEDXAQjAAEEOQYUAAEDcAPoAAEC6QX6AAEBXgRKAAEDXARKAAEBXgYUAAEGQAPoAAED1APoAAEEOARKAAEDFgRSAAEDJQQUAAEBbQVGAAEF8ARKAAEDhARKAAEDwARKAAEDYARKAAEEeAVkAAEDjgPoAAEFGwW2AAEEQARKAAQAAAABAAgAAQAMJKgAAgAWAEwAAgABBTsFQQAAAAcAAQAeAAAAMAAAACQAAQAqAAAAMAAAADAAASRIAAECaQA8AAEBqARKAAEBqAAoAAECaQRKAzEO+g8AEUAu5A+oEUYRWBFeEgwSEg2kEXAOahF8EYIRiBGOEZQPKg8wEZoRoBLGEswRshG4EcoR0BAOEdYS0hLYEA4R1hLeEuQR9BH6EgwSEhIYEh4SJBIqEjASNhDUEkIPQhJUEmASZhLqEvAR3BHiEUwRUhHcEeIRZBFqEP4RdhHcEeISeBJ+EroSwBK6EsARphGsEroSwBG+EcQSeBJ+EtIS2BHcEeIR3BHiEegR7hIAEgYS9hL8EngSfhMCEwgSPBKKEkgSThJaEpYSbBJyISQPACEkDwAhJA8AISQPACEkDwAhJA8AINYOZCI+EUYisBISIrASEiKwEhIisBISIm4RlCJuEZQibhGUIm4RlCJKEV4ijBHQIpIR1iKSEdYikhHWIpIR1iKSEdYikhHWIrYSHiK2Eh4ithIeIrYSHiLUElQS0hLYDMYMzBLqEvAS6hLwEuoS8BLqEvAS6hLwEuoS8A+uD7QRTBFSEWQRahFkEWoRZBFqEWQRahK6EsASuhLAEroSwBK6EsAS0hLYEngSfhLSEtgS0hLYEtIS2BLSEtgS0hLYEtIS2BJ4En4SeBJ+EngSfhJ4En4SWhKWEdwR4hJaEpYhJA8AEuoS8CEkDwAS6hLwDvoPABLqEvAiPhFGEUwRUiI+EUYRTBFSIj4RRhFMEVIiPhFGEUwRUiJKEV4R3BHiEVgRXgzSDNgisBISEWQRaiKwEhIRZBFqIrASEhFkEWoSDBISEWQRaiKwEhIRZBFqImIRfBHcEeIiYhF8EdwR4iJiEXwR3BHiDmoRfBHcEeIiaBGIIuYSfhGCEYgSeBJ+Im4RlBK6EsAibhGUEroSwCJuEZQSuhLAEY4RlBK6EsAibhGUEroSwA1oDW4M3gzkITwPMBK6EsARmhGgEaYRrBGmEawjEBLMIwoSwBLGEswSuhLAEsYSzBK6EsASxhLMDOoM8BLGEswSuhLAIowR0BJ4En4RyhHQEngSfiKMEdASeBJ+DPYM/BHKEdASeBJ+IpIR1hLSEtgikhHWEtIS2CKSEdYS0hLYDQINCA0ODRQjHBLkEegR7hLeEuQR6BHuIxwS5BHoEe4ipBH6EgASBiKkEfoSABIGEfQR+hIAEgYipBH6EgASBhIMEhIS9hL8IrASEhL2EvwSDBISEvYS/CK2Eh4SeBJ+IrYSHhJ4En4ithIeEngSfiK2Eh4SeBJ+IrYSHhJ4En4SGBIeEngSfiLCEjYSPBKKItQSVBJaEpYi1BJUItoSZhJsEnIi2hJmEmwSciLaEmYSbBJyDRoSnBJgEmYiwhI2EjwSiiLCEjYSPBKKIsISNhI8Eooi1BJUEloSlhK6EsASuhLAIoARuBG+EcQO+g8AEuoS8A0+DUQNSg1QEQQRCg0gDSYO+g8AEuoS8CEkDwANLBLwISQPAA0sEvAhJA8ADSwS8CEkDwANLBLwISQPACMiEvAhJA8AEuoS8CEkDwAjIhLwISQPACMiEvAhJA8AIyIS8CEkDwAjIhLwISQPABLqEvASDBISEWQRaiKwEhIRZBFqIrASEhFkEWoisBISDTIRaiKwEhINMhFqIrASEg0yEWoisBISIlARaiKwEhIRZBFqIm4RlBK6EsARjhGUEroSwBAOEdYS0hLYIpIR1hLSEtgikhHWDTgS2CKSEdYNOBLYIpIR1g04EtgikhHWIxYS2CKSEdYS0hLYIEYNRA1KDVAgRg1EDUoNUCBGDUQNSg1QIEYNRA1KDVANPg1EDUoNUBIYEh4SeBJ+IrYSHhJ4En4iGhEKDVYNXCIaEQoNVg1cIhoRCg1WDVwiGhEKDVYNXBEEEQoNVg1cD0ISVBJaEpYi1BJUEloSliLUElQSWhKWEgwSEhL2EvwR3BHiEVgRXhHcEeIR3BHiDWIu/A1oDW4PqBFGIj4RRg10DXoRWBFeDYANhhHcEeIR3BHiDYwNkhIMEhINmA2eDrIOuA2kEXAiYhF8DfIN+A2qDbANtg28EY4RlBGaEaARphGsEroSwA/qD/ANwg3IEcoR0BJ4En4Pug/ADc4N1A3aDeAN5g3sEdwR4hLeEuQR9BH6EgASBg3yDfgN/g4EEvYS/BAUEBoS9hL8EgwSEg+6D8ASJBIqD0ISVA/qD/ASYBJmEmwScg4KDnAOCg5wDgoOcBE0EToOEA4WDhwOIiDiDnwOgg6IDo4OlA4oDi4Pug/ADjQOOg5ADkYOTA5SDlgOXiEkDwAS6hLwIm4RlBK6EsAikhHWEtIS2CK2Eh4SeBJ+EWQRaiDWDmQPrg+0DmoRfBHcEeIiYhF8EdwR4iJ0EaAiehGsEA4R1hLSEtgikhHWEtIS2CDcDnAP9g/8DnYOfA6CDogOjg6UImIRfBHcEeIOmg6gDqYOrCKMEdASeBJ+ISQPABLqEvAhJA8AEuoS8CKwEhIRZBFqIrASEhFkEWoibhGUEroSwCJuEZQSuhLAIpIR1hLSEtgikhHWEtIS2CMcEuQR6BHuIxwS5BHoEe4ithIeEngSfiK2Eh4SeBJ+DrIOuA/2D/wiaBGIIuYSfhGCEYgR3BHiDr4OxBLSEtgSYBJmEmwSciEkDwAS6hLwEgwSEhFkEWoikhHWEtIS2CLUElQSWhKWDsoO0A7WDtwO4g7oDu4O9A7uDvQO+g8AD6gRRhFMEVISxhLMEgwSEhIAEgYSbBJyDwYPDA8SDxgRQC7kEhgSHg8eDyQSDBISEWQRag8qDzASuhLADzYPPBHcEeIS3hLkEegR7g9CElQSWhKWEuoS8BHcEeIR3BHiEdwR4hFMEVIPSA9OEdwR4hHcEeIRZBFqEWQRag9UD1oPYA9mETQROhFALuQSJBIqEroSwBHcEeIR3BHiECAQJhMCEwgTAhMIEngSfhJ4En4SeBJ+EroSwBDmEOwPbA9yD3gPfg+ED4oPkA+WD5wPohG+EcQRvhHEEb4RxBJ4En4SeBJ+D6gRRhLSEtgPrg+0D7oPwBMOExQR6BHuEegR7hHoEe4R6BHuEegR7hDmEOwQ5hDsD8YPzA/GD8wSABIGEroSwBK6EsASuhLAEroSwBL2EvwS9hL8EngSfhLSEtgP0g/YEwITCBI8EooSWhKWD94P5BJsEnIP6g/wD/YP/A/2D/wQRBBKEEQQShBEEEoQAhAIEA4R1hAUEBoSJBIqECAQJhAsEDISuhLAEaYRrBA4ED4R3BHiEEQQShBEEEoQUBBWEFwQYhBoEG4QdBB6EKQQqhCAEIYQgBCGEIwQkhCYEJ4QpBCqELAQthC8EMIQvBDCEdwR4hHcEeIQ/hF2Eb4RxBJ4En4R3BHiEegR7hDmEOwSABIGEvYS/BJsEnIR3BHiEMgQzhDUEkIQ2hDgEroSwBDmEOwR3BHiEPIQ+BLSEtgR3BHiEdwR4hD+EXYRBBEKEaYRrBK6EsARvhHEEngSfhHcEeIR6BHuEgASBhEQERYTAhMIEkgSThJsEnIS6hLwEdwR4hHcEeIRZBFqERwRIhE0EToRKBEuEroSwBFMEVISuhLAEngSfhE0EToiOC7kEdwR4hFALuQR3BHiEUAu5BHcEeIiPhFGEUwRUiJKEV4R3BHiEVgRXhHcEeIRWBFeEdwR4hFYEV4R3BHiEVgRXhHcEeIisBISIlARaiKwEhIiUBFqEgwSEhFkEWoSDBISEWQRaiKwEhIRZBFqIlYRcCJcEXYiYhF8EdwR4iJoEYgi5hJ+EYIRiBJ4En4iaBGIIuYSfhGCEYgSeBJ+EYIRiBJ4En4RjhGUEroSwCJuEZQjChLAInQRoCJ6EawRmhGgEaYRrBGaEaARphGsEsYSzBK6EsAjEBLMIwoSwBLGEswSuhLAEsYSzBK6EsAigBG4Eb4RxBGyEbgRvhHEIowR0BJ4En4RyhHQEngSfhHKEdASeBJ+EcoR0BJ4En4ikhHWIxYS2CKSEdYjFhLYIpIR1iMWEtgikhHWIxYS2CMWEtgR3BHiIxYS2BHcEeIjHBLkEegR7hLeEuQR6BHuIxwS5BHoEe4S3hLkEegR7iKkEfoSABIGEfQR+hIAEgYipBH6EgASBiKkEfoSABIGIqQR+hIAEgYisBISIygS/BIMEhIS9hL8EgwSEhL2EvwSDBISEvYS/BIYEh4SeBJ+EhgSHhJ4En4SGBIeEngSfiK2Eh4i5hJ+IrYSHiLmEn4ivBIqEwITCBIkEioTAhMIEjASNhI8EooSMBI2EjwSiiLIEkISSBJOIsgSQhJIEk4i1BJUEloSliLaEmYSbBJyEmASZhJsEnISYBJmEmwSchJ4En4jKBL8EoQSihKQEpYS6hLwIvgSnBKiEqgSrhK0EsYSzBK6EsASxhLMEtIS2BLeEuQS6hLwEvYS/BMCEwgTDhMUAAEFDAYEAAEFDP6EAAEE8AYEAAEE8P6EAAEEHQYsAAEEHf6EAAECmgYEAAECmv6EAAEFhwYEAAEFh/6EAAEHbQYEAAEHbf6EAAEHkQYEAAEHkf6EAAECnAYEAAEFOwYEAAEFO/6EAAEEfQZoAAEEgwZoAAEE1wZoAAEGRgYEAAEGRv6EAAEE7gYEAAEE7v6EAAEFZAYEAAEFZP6EAAEFFAYEAAEE5QYEAAEE5f6EAAEEBgYEAAEEBv6EAAEGewYEAAEGe/6EAAEE1QYEAAEE1f6EAAEF6QYEAAEF6f6EAAEEJwYEAAEHeQYEAAEHef6EAAECyQYEAAECyf6EAAEIPQYEAAEIPf6EAAEImgYEAAEImv6EAAEGwwYEAAEGw/6EAAEFewYEAAEFe/6EAAEEjwYEAAEEj/6EAAECeQYEAAECef6EAAEErAYEAAEDtAYEAAEDtP6EAAEEsgYEAAEEsv6EAAEGYAYEAAEGYP6EAAEEIQYEAAEEIf6EAAEIRAYEAAEIRP6EAAEIJQYEAAEIJf6EAAEHAgYEAAEHAv6EAAEHDP6EAAEF0wYEAAEErP6EAAEKVgYEAAEKVv6EAAEJmgYEAAEJmv6EAAEIrgYEAAEIrv6EAAEHgQYEAAEHgf6EAAEFSgYEAAEFSv6EAAEEtAYEAAEEtP6EAAEFXgYEAAEFXv6EAAEDDAYEAAEDDP6EAAEF9gYEAAEF9v6EAAEDGwYEAAEDG/6EAAEHxwYEAAEHx/6EAAEFHQYEAAEFHf6EAAEDlgYEAAEDlv6EAAEDfQYEAAEDff6EAAEE3QYEAAEE3f6EAAECLwYEAAECL/6EAAEGIwYEAAEGI/6EAAEEhwYEAAEEPwYEAAEEP/6EAAEGGwYEAAEGG/6EAAED3QYEAAED3f6EAAECjwYEAAECj/6EAAEDBgYEAAEDBv6EAAECzQYEAAECzf6EAAECDgYEAAECDv6EAAEFQgYEAAEFQv6EAAEFDgYEAAEG6QYEAAEG6f6EAAEGQgYEAAEGQv6EAAEEeQYEAAEEef6EAAEE5wYEAAEE5/6EAAEDywYEAAEDy/6EAAEEVgYEAAEEVv6EAAED/AYEAAED/P6EAAEDwQYEAAEDwf6EAAEGPwYEAAEEnAYEAAEEnP6EAAEEmAYEAAEEmP6EAAEFGQYEAAEFGf6EAAEDewYEAAEDe/6EAAEDaAYEAAEDaP6EAAEHrgYEAAEHrv6EAAEH2QYEAAEH2f6EAAEIQgYEAAEIQv6EAAEGEgYEAAEGEv6EAAEG5QYEAAEG5f6EAAEFTAYEAAEFTP6EAAEE0wYEAAEE0/6EAAEEEgYEAAEEEv6EAAEEuAYEAAEEuP6EAAEFBAYEAAEFBP6EAAEEJQYEAAEEJf6EAAEEsAYEAAEH1QYEAAEH1f6EAAECxQYEAAECxf6EAAEExQYEAAEExf6EAAECwQYEAAEGPQYEAAEGPf6EAAEDZAYEAAEDZP6EAAED8gYEAAED8v6EAAEFMQYEAAEFMf6EAAED7AYEAAED7P6EAAEFMwYEAAEFDv6EAAED1wYEAAED1/6EAAEF1wYEAAEF1/6EAAEEgwYEAAEEg/6EAAEEJ/6EAAECwf6EAAEF0/6EAAEF7gYEAAEF7v6EAAECtgYEAAECtv6EAAEE9AYEAAEE9P6EAAEERgYEAAEERv6EAAEHQgYEAAEHQv6EAAEHewYEAAEHe/6EAAEGFAYEAAEGFP6EAAEGP/6EAAEE7AYEAAEE7P6EAAEDTgYEAAEDTv6EAAEEZAYEAAEEZP6EAAED1QYEAAED1f6EAAEEcwYEAAEEc/6EAAEF2QYEAAEF2f6EAAEEzQYEAAEEzf6EAAEHcQYEAAEHcf6EAAEGSgYEAAEEsP6EAAEEOwYEAAEEO/6EAAEEh/6EAAEEFAYEAAEEkwYEAAEEk/6EAAEDwwYEAAEDw/6EAAEE8gYEAAEE8v6EAAEGSgZoAAEGSv6EAAEEFAZoAAEEFP6EAAECnP6EAAEDfwYEAAEDf/6EAAEEwQYEAAEEwf6EAAECEAYEAAECEP6EAAEEMQYEAAEEMf6EAAEE1wYEAAEE1/6EAAEE+gYEAAEE+v6EAAEEfQYEAAEEff6EAAEC4wYEAAEC4/6EAAEEEAYEAAEEEP6EAAEFzQYEAAEFzf6EAAQBAAABAAgAAQAMEToAAgAWAC4AAgABCIsIjgAAAAQAAAASAAEAEgAAABIAAQASAAEAAARKAzEN1CEmDughJg7uISYO+iEmD2AhJg8GISYPEiEmDxghJg8eISYN7CEmDyQhJg/AISYPMCEmDzwhJg9CISYPxiEmD0IhJg/MISYPVCEmD2AhJg9mISYPbCEmD3IhJg94ISYPhCEmD4ohJg/SISYPSCEmDvQhJg9IISYPACEmDwwhJg9IISYPliEmD7ohJg+6ISYPKiEmD7ohJg82ISYPliEmD8YhJg9IISYPSCEmD04hJg9aISYP2CEmD5YhJg/eISYPnCEmD34hJg+iISYPkCEmDdQhJg3UISYN1CEmDdQhJg3UISYN1CEmDYYhJg7uISYPYCEmD2AhJg9gISYPYCEmDx4hJg8eISYPHiEmDx4hJg76ISYPPCEmD0IhJg9CISYPQiEmD0IhJg9CISYPQiEmD2YhJg9mISYPZiEmD2YhJg+EISYPxiEmDMYhJg/SISYP0iEmD9IhJg/SISYP0iEmD9IhJg4oISYO9CEmDwAhJg8AISYPACEmDwAhJg+6ISYPuiEmD7ohJg+6ISYPxiEmD5YhJg/GISYPxiEmD8YhJg/GISYPxiEmD8YhJg+WISYPliEmD5YhJg+WISYPoiEmD0ghJg+iISYN1CEmD9IhJg3UISYP0iEmDdQhJg/SISYO7iEmDvQhJg7uISYO9CEmDu4hJg70ISYO7iEmDvQhJg76ISYPSCEmDvohJgzMISYPYCEmDwAhJg9gISYPACEmD2AhJg8AISYPYCEmDwAhJg9gISYPACEmDxIhJg9IISYPEiEmD0ghJg8SISYPSCEmDxIhJg9IISYPGCEmD5YhJg8YISYPliEmDx4hJg+6ISYPHiEmD7ohJg8eISYPuiEmDx4hJg+6ISYPHiEmD7ohJg0OISYM0iEmDewhJg+6ISYPJCEmDyohJg8qISYPwCEmD7ohJg/AISYPuiEmD8AhJg+6ISYPwCEmDNghJg/AISYPuiEmDzwhJg+WISYPPCEmD5YhJg88ISYPliEmDN4hJg88ISYPliEmD0IhJg/GISYPQiEmD8YhJg9CISYPxiEmDOQhJgzqISYPzCEmD04hJg/MISYPTiEmD8whJg9OISYPVCEmD1ohJg9UISYPWiEmD1QhJg9aISYPVCEmD1ohJg9gISYP2CEmD2AhJg/YISYPYCEmD9ghJg9mISYPliEmD2YhJg+WISYPZiEmD5YhJg9mISYPliEmD2YhJg+WISYPZiEmD5YhJg9yISYPnCEmD4QhJg+iISYPhCEmD4ohJg+QISYPiiEmD5AhJg+KISYPkCEmD6ghJg+KISYPciEmD5whJg9yISYPnCEmD3IhJg+cISYPhCEmD6IhJg+6ISYPuiEmDzAhJg82ISYN1CEmD9IhJgz2ISYM/CEmDsohJgzwISYN1CEmD9IhJg3UISYP0iEmDdQhJg/SISYN1CEmD9IhJg3UISYP0iEmDdQhJg/SISYN1CEmD9IhJg3UISYP0iEmDdQhJg/SISYN1CEmD9IhJg3UISYP0iEmDdQhJg/SISYPYCEmDwAhJg9gISYPACEmD2AhJg8AISYPYCEmDwAhJg9gISYPACEmD2AhJg8AISYPYCEmDwAhJg9gISYPACEmDx4hJg+6ISYPHiEmD7ohJg9CISYPxiEmD0IhJg/GISYPQiEmD8YhJg9CISYPxiEmD0IhJg/GISYPQiEmD8YhJg9CISYPxiEmDPYhJgz8ISYM9iEmDPwhJgz2ISYM/CEmDPYhJgz8ISYM9iEmDPwhJg9mISYPliEmD2YhJg+WISYOyiEmDQIhJg7KISYNAiEmDsohJg0CISYOyiEmDQIhJg7KISYNAiEmD4QhJg+iISYPhCEmD6IhJg+EISYPoiEmD2AhJg/YISYPSCEmDvohJg9IISYPSCEmDQghJg0OISYO7iEmDu4hJg0UISYO+iEmDRohJg9IISYPSCEmDSAhJg9gISYNJiEmDbAhJg8GISYPEiEmDVAhJg0sISYNMiEmDx4hJg8kISYPKiEmD7ohJg5GISYNOCEmDzwhJg+WISYOLiEmDT4hJg1EISYNSiEmD0ghJg/MISYPVCEmD1ohJg1QISYNViEmD9ghJg5YISYP2CEmD2AhJg4uISYPbCEmD4QhJg5GISYPiiEmD5AhJg2MISYNjCEmDYwhJg7iISYNXCEmDWIhJg2SISYNmCEmDZ4hJg1oISYOLiEmDW4hJg10ISYNeiEmDYAhJg3UISYP0iEmDx4hJg+6ISYPQiEmD8YhJg9mISYPliEmDwAhJg2GISYOKCEmDxIhJg9IISYPEiEmD0ghJg8kISYPKiEmD0IhJg/GISYPQiEmD8YhJg2MISYOTCEmDZIhJg2YISYNniEmDxIhJg9IISYNpCEmDaohJg88ISYPliEmDdQhJg/SISYN1CEmD9IhJg9gISYPACEmD2AhJg8AISYPHiEmD7ohJg8eISYPuiEmD0IhJg/GISYPQiEmD8YhJg/MISYPTiEmD8whJg9OISYPZiEmD5YhJg9mISYPliEmDbAhJg5MISYPGCEmD5YhJg8YISYPSCEmDbYhJg/GISYPiiEmD5AhJg3UISYP0iEmD2AhJg8AISYPQiEmD8YhJg+EISYPoiEmDbwhJg3CISYNyCEmDc4hJg3OISYN1CEmDu4hJg70ISYPwCEmD2AhJg9aISYPkCEmDdohJg3gISYO6CEmD2YhJg3mISYPYCEmDwAhJg3sISYPuiEmDfIhJg9IISYPzCEmD04hJg+EISYPoiEmD9IhJg9IISYPSCEmD0ghJg70ISYN+CEmD0ghJg9IISYPACEmDwAhJg3+ISYOBCEmDuIhJg7oISYPbCEmD7ohJg9IISYPSCEmDl4hJg/eISYP3iEmD5YhJg+WISYPliEmD7ohJg6+ISYOCiEmDhAhJg4WISYOHCEmDiIhJg82ISYPNiEmDzYhJg+WISYPliEmDu4hJg/GISYOKCEmDi4hJg/kISYPTiEmD04hJg9OISYPTiEmD04hJg6+ISYOviEmDjQhJg40ISYPWiEmD7ohJg+6ISYPuiEmD7ohJg/YISYP2CEmD5YhJg/GISYOOiEmD94hJg+cISYPoiEmDkAhJg+QISYORiEmDkwhJg5MISYOcCEmDnAhJg5wISYOUiEmD0IhJg5YISYPbCEmDl4hJg5kISYPuiEmDyohJg5qISYPSCEmDnAhJg5wISYOdiEmDnwhJg6CISYOiCEmDqAhJg6OISYOjiEmDpQhJg6aISYOoCEmDqYhJg6sISYOrCEmD0ghJg9IISYPDCEmDzYhJg+WISYPSCEmD04hJg6+ISYPWiEmD9ghJg+QISYPSCEmDrIhJg94ISYOuCEmD7ohJg6+ISYPSCEmDsQhJg/GISYPSCEmD0ghJg8MISYOyiEmDyohJg+6ISYPNiEmD5YhJg9IISYPTiEmD1ohJg7QISYP3iEmD34hJg+QISYP0iEmD0ghJg9IISYPACEmDtYhJg7iISYO3CEmD7ohJg70ISYPuiEmD5YhJg7iISYO6CEmD0ghJg7oISYPSCEmDughJg9IISYO7iEmDvQhJg76ISYPSCEmDvohJg9IISYO+iEmD0ghJg76ISYPSCEmDvohJg9IISYPYCEmDwAhJg9gISYPACEmD2AhJg8AISYPYCEmDwAhJg9gISYPACEmDwYhJg8MISYPEiEmD0ghJg8YISYPliEmDxghJg+WISYPGCEmD5YhJg8YISYPliEmDxghJg+WISYPHiEmD7ohJg8eISYPuiEmDyQhJg8qISYPJCEmDyohJg8kISYPKiEmD8AhJg+6ISYPwCEmD7ohJg/AISYPuiEmD8AhJg+6ISYPMCEmDzYhJg8wISYPNiEmDzwhJg+WISYPPCEmD5YhJg88ISYPliEmDzwhJg+WISYPQiEmD8YhJg9CISYPxiEmD0IhJg/GISYPQiEmD8YhJg/GISYPSCEmD8YhJg9IISYPzCEmD04hJg/MISYPTiEmD8whJg9OISYPzCEmD04hJg9UISYPWiEmD1QhJg9aISYPVCEmD1ohJg9UISYPWiEmD1QhJg9aISYPYCEmD9ghJg9gISYP2CEmD2AhJg/YISYPYCEmD9ghJg9mISYPliEmD2YhJg+WISYPZiEmD5YhJg9mISYPliEmD2YhJg+WISYPbCEmD94hJg9sISYP3iEmD3IhJg+cISYPciEmD5whJg94ISYPfiEmD3ghJg9+ISYPhCEmD6IhJg+KISYPkCEmD4ohJg+QISYPiiEmD5AhJg+WISYP2CEmD5whJg+iISYP0iEmD6ghJg+uISYPtCEmD8AhJg+6ISYPwCEmD8YhJg/MISYP0iEmD9ghJg/eISYP5CEmAAEFDAcwAAEE8AcwAAEEHQcwAAECmgcwAAEFhwcwAAEHbQcwAAEHkQcwAAEFOwcwAAEGRgcwAAEE7gcwAAEFZAcwAAEFFAcwAAEE5QcwAAEEBgcwAAEGewcwAAEE1QcwAAEF6QcwAAEHeQcwAAECyQcwAAEIPQcwAAEImgcwAAEGwwcwAAEFewcwAAEEjwcwAAECeQcwAAEDtAcwAAEEsgcwAAEGYAcwAAEEIQcwAAEIRAcwAAEIJQcwAAEHAgcwAAEHDAcwAAEErAcwAAEKVgcwAAEJmgcwAAEIrgcwAAEHgQcwAAEFSgcwAAEEtAcwAAEFXgcwAAEDDAcwAAEF9gcwAAEDGwcwAAEHxwcwAAEFHQcwAAEDlgcwAAEDfQcwAAEE3QcwAAECLwcwAAEGIwcwAAEEPwcwAAEGGwcwAAED3QcwAAECjwcwAAEDBgcwAAECzQcwAAECDgcwAAEFQgcwAAEG6QcwAAEGQgcwAAEEeQcwAAEE5wcwAAEDywcwAAEEVgcwAAED/AcwAAEDwQcwAAEEnAcwAAEEmAcwAAEFGQcwAAEDewcwAAEDaAcwAAEHrgcwAAEH2QcwAAEIQgcwAAEGEgcwAAEG5QcwAAEFTAcwAAEE0wcwAAEEEgcwAAEEuAcwAAEFBAcwAAEEJQcwAAEH1QcwAAECxQcwAAEExQcwAAEGPQcwAAEDZAcwAAED8gcwAAEFMQcwAAED7AcwAAEFMwcwAAEFDgcwAAED1wcwAAEF1wcwAAEEgwcwAAEEJwcwAAECwQcwAAEF0wcwAAEF7gcwAAECtgcwAAEE9AcwAAEERgcwAAEHQgcwAAEHewcwAAEGFAcwAAEGPwcwAAEE7AcwAAEDTgcwAAEEZAcwAAED1QcwAAEEcwcwAAEF2QcwAAEEzQcwAAEHcQcwAAEEsAcwAAEEOwcwAAEEhwcwAAEEkwcwAAEDwwcwAAEE8gcwAAEGSgcwAAEEFAcwAAECnAcwAAEDfwcwAAEEwQcwAAECEAcwAAEEMQcwAAEE1wcwAAEE+gcwAAEEfQcwAAEC4wcwAAEEEAcwAAEFzQcwAAQAAAABAAgAAQAMABIAAQBaAGYAAQABBQYAAQAiACQAJgAoACoALAAuAC8AMQAyADUANgA3ADgARABGAEgASgBMAE4ATwBRAFIAVQBWAFcAWACQAJwAnQCvALAAvAC9APwAAQAAAAYAAf/iAA8AIgBGAEwNRglcAIIMSgCODOAIfgxKDQQK7gCIAFIAWABeAGQAcA0EAHAAagCOAHAAdgB8AI4AggCIAIgAlACUAI4AjgCUAAECjgAAAAECwgAAAAEB6gAAAAECCgAAAAECNQAAAAECRP5cAAECTgAAAAEA0gAAAAEBzAAAAAEBpAAAAAEBLAAAAAEC2gAAAAECRAAAAAEA5gAAAAQAAAABAAgAAQAMABIAAQCsALgAAQABBQcAAgAZACQAPQAAAEQAXQAaAIIAmAA0AJoAuABLALoBQgBqAfoCAQDzAjcCNwD7Ak4CTgD8AlMCVgD9AlwCXwEBAxoDcQEFA34DswFdA7cDugGTA78DzwGXA9gD2AGoA90EGQGpBB4EHwHmBCIEngHoBfQGIgJlBlUG5gKUCAMIAwMmCFEIUQMnCGIIaAMoCHMIcwMvCHYIdgMwAAEAAAAGAAEAeAAAAzEIOAoGCD4KKgvOC9QLwgpmCngIVgqEC84KnAqoCsYL1AuSC9oK8AsIC1YLvAt0C3oLgAuMC+AKDArwCvAKzApgCzgKrgvICbIKfgvICqIKrgrMCtIJKAvICvYLqgjgC2ILsAyUC7YLhgg4CDgIOAg4CDgIOAZkCh4LzgvOC84Lzgp4CngKeAp4CioKqArGCsYKxgrGCsYKxgtWC1YLVgtWC4AL1ArGC+AL4AvgC+AL4AvgBmoGcArMCswKzArMC8gLyAvIC8gKzAquCswKzArMCswKzArMCOAI4AjgCOALtgrSC7YIOAvgCDgL4AZ2BnwIPgrwCD4K8Ag+CvAIPgrwCioK8AoqCvALzgrMC84KzAvOCswHzAaCC84KzAvCCzgLwgs4C8ILOAaICzgKZgquCmYKrgp4C8gKeAvICngLyAaOBrgKeAvIBpQJiAhWCbIKhAp+DJQLzgvIBzYLyAvOC8gLzgvIC84LyAqoCq4KqAquCqgKrgaaBqAGpgrGCswKxgrMCsYKzAasBrIL2gvIC9oGuAvaC8gK8Ar2CvAK9gpOBr4K8Ar2BsQGygsIC6oK8AuqC1YI4AtWCOALVgjgC1YI4AtWCOAG0AbWC3QLsAuAC7YLgAuMC4YLjAuGC4wLhgvICQQLdAuwC3QLsAt0C7ALgAu2CbIJsgqcCqIIOAc2CsYKzAbcCOAIOAc2CDgHNgg4BzYIOAc2CDgHNgg4BzYIOAc2CDgHNgg4BzYIOAc2CDgHNgg4BzYG6AbiC84KzAvOCswLzgrMC84KzAvOCswLzgrMBugG7gp4C8gG9Ab6BwAHBgrGCswKxgrMCsYKzArGCswKxgrMBwAHBgrGCswKxgrMCsYKzArGCswHAAcGBwwI4AtWCOALVgjgC1YI4AtWCOALVgjgBwwI4AcSC7YLgAu2C4ALtgcYCQQKDAvOCgYKDAoGCgwK8Ag+CvAKKgceByQK8ArwCAIHKgoGBzAHNgpOBzwHQgp4CoQKfgvIB0gHTgqoCXAKxgdUB1oHYArSB2YK8Ar2C4wI/gj+CwgLqgdsB3ILvAd4C7YLjAuGB34LvAeECvYHigrSB5AHlgecB6IHqAmIB64HtAhcCDgL4Ap4C8gKxgrMC1YI4Ae6B8AHxgvCCzgLwgs4CoQKfgfMCkIHzApCB9II/gfYB94H5AvCCzgH6gfwCqgKrgg4C+AIOAvgC84KzAvOCswKeAvICngLyArGCswKxgrMC9oLyAvaC8gLVgjgC1YI4ArwB/YKZgquB/wJdggCCgwICAgOCDgL4AgUCBoKxgrMC4ALtgggCCYLqggsCDIIOAg+CswLzgsICEQISgp4CS4KBgtWCFALzgoMCFYJsghcCGgL2gvIC4ALtgrwCkgKDAoMCGIL5ghoCkgK8ArwCvAK8Ar2CvYKDAmyCzgLOAoMCG4L5gh0Cq4IegvICZQKeAjaCNoIzgiACIYIjAiSCq4ImAieCswIpAiqCLALgAuACh4K0gi2C8gIvAquC+YIwgjICMgIzgjUCNoJBAjgCgwKzAuMCOYI7AjyCPgJZAj+CQQJLgkKCRAJiArGCRYJFgrwC3oJsgkcCSIJKAkuCTQJOglACUYJTAlSCVgJXgvCCn4JZAlqCXAJdgoMCvAKYAqiCq4K0gvIC8gK9guqC4YJfAmCCYgJjgvICZQK0gu8C7wJvgncCe4LOAmaCaYJoAncCtIJpgmsCbIJuAm+CcQJygn6CdAJ1gncCgAJ4gnoCe4J9An6CgAKBgoMChIKGAukC6QKHgokCioK8AowCkgKNgsaCjwKSApCCkgLzgrMC84KzApOCk4KVApUCloLUAvUCmALwgs4CmYKrgpmCq4KZgquCmYKrgpmCq4KbApyCngLyAqECn4KhAp+CoQKigvCCpALwgqQC5gK5AtQCpYKnAqiCpwKogqoCq4KqAquCrQLpAq6CsAKxgrMCsYKzArGCswKxgrMC9QK0gvUCtIL2gvIC9oK2AvaCtgK3grkCvAK9grqCwIK8Ar2CvAK9gr8CwILCAuqCw4LFAsaCyALJgssCzILOAs+C0QLSgtQC1YLXAtWC1wLvAtiC2gLbgt0C7ALdAuwC3oMlAt6DJQLgAu2C4wLhguMC5ILmAueC6QLqguwC7YL4AvIC7wLwgvOC8gLzgvUC9oL4AvmC+wL8gABBkUAAAABBVAAAAABApT+SAABBNj+cAABA9T+cAABA5j+cAABBQAAKAABAbb+cAABA1z+rAABBNgAAAABBGD+rAABA9T+SAABBnUAAAABBhgAAAABAWL+cAABAk7+cAABAkT+cAABAeD+cAABA1z+cAABBET+cAABAz4AAAABA9QAMgABA+gAAAABA94AKAABAbb+ygABAWL+ygABA5j+ygABAqj+ygABA1L+ygABAqD+ygABAor+SAABA2YAAAABAwwAAAABA1wAAAABAPD+SAABA9QAAAABBWQAAAABAjAAAAABBBAAAAABBYwAAAABB+EAAAABBh3+FAABAiMAAAABBMT/MwABAw3+SAABA4QAAAABAqoAAAABAmgAAAABAl0AAAABAjEAAAABCVIAAAABCNoAAAABB9MAAAABBMz+rAABBO/+SAABBrj+rAABBsz+SAABAn4AAAABBhwAAAABBV8AAAABA6z+cAABAnYAAAABCX8AAAABCMQAAAABB9AAAAABBcwAAAABAX/+FAABAjD+SAABBTP+FAABAyAAAAABAgIAAAABAYoAAAABAs3+SAABAqn+SAABAgwAAAABBQcAAAABBdwAAAABBfoAAAABBRsAAAABA3oAAAABAu3+SAABAxL+SAABBMQAAAABAJb+rAABBbT+SAABAhIAAAABBMT+SAABAmz+SAABBET+FAABA+j+SAABA1z+SAABBs0AAAABBtX+FAABBmj+SAABBOz+SAABBGAAAAABBXUAAAABBJIAAAABAz3+FAABAeX+SAABAh3+FAABAcz+SAABAMz+SAABAfT+SAABAK/+SAABAd0AAAABBEQAAAABBikAAAABBAgAAAABAkIAAAABBBD+SAABAkT+SAABAgj+SAABAmoAAAABAecAAAABAooAAAABA5j+FAABAtcAAAABBD3+FAABAbIAAAABAkwAAAABBuAAAAABBgn+SAABBzMAAAABBHQAAAABAur+SAABBYMAAAABBan+SAABA0gAAAABBAoAAAABBEz+FAABBNj+SAABAuT+SAABA4ECtAABAtD+SAABBy8AAAABAfQAAAABA5j+hAABBqT+hAABAUD+hAABAqD+hAABAMj+SAABA0j+hAABA4T+hAABAuT+hAABBKL+hAABBM7+SAABBIj+hAABBBr+hAABBTP+hAABAkT+hAABAXz+hAABAMj+hAABBRT+hAABAUr+hAABAvgAAAABAtAAAAABAuT+ygABAtD+ygABAzT+SAABAoD+SAABAp4AAAABAvj+ygABAuT+6AABAuT+cAABAwz+cAABBD0AAAABAqj+cAABAqj+ogABAtr+cAABAZMAAAABBSUAAAABAbb+ogABAWL+ogABAbYAAAABBCQAAAABBPQAAAABAp7+6AABAXL+ygABAWj+cAABBnsAAAABBtUAAAABBU4AAAABBEwAAAABA3D+6AABA4T+cAABAu7+cAABA5gAAAABAqgAAAABAWL+FAABAUr+ygABAwz+6AABAWj+6AABAoD+ygABApQAAAABAiYAAAABAmz+ygABAjD+ygABApYAAAABApT+ygABAgj+ygABApT+6AABAgj+6AABAq3+cAABAf7+cAABA1L+XAABArz+SAABAy/+ogABAtD+ogABAzT+cAABArz+cAABA1IAAAABBDoAAAABAmIAAAABArz+ygABAmL+ygABBcgAAAABBGoAAAABAqAAAAABAuQAAAABBAYAAAABAzQAAAABArz+6AABAkT+6AABAtD+6AABAggAAAABBPYAAAABAUD+SAABArwAAAABA8AAAAABAWIAAAABA6wAAAABAX8AAAABBJwAAAABA9cAAAABAmwAAAABAlgAAAABA0UAAAAEAAAAAQAIAAEADAAUAAEANgBMAAEAAgUBBQIAAgAFAEUARwAAAEkASwADAE0AUQAGAFMAWgALAF0AXQATAAIAAAAKAAAAEAABAAAAVgABAKIAVgAUAFoAKgAwADYAPABUAEIASABmAE4AVABaAGAAZgBsAHIAeAB+AIQAigABAyAAVgABBD0AVgABAZMAVgABAxb+cAABASz+hAABBBoAAAABBtUAVgABBEwAVgABA9QAVgABBD3+1AABAWIAVgABAwwAVgABAggAVgABBEQAVgABAmIAVgABBOwAVgABA3MAVgAGAgAAAQAIAAEADAAMAAEAcAHqAAIAEAJgAmIAAAKMAo8AAwNzA3MABwTkBPQACAT6BPoAGQUcBR4AGgUhBSMAHQUlBSUAIAUpBSsAIQUvBTEAJAU2BTYAJwU6BToAKAVCBU4AKQZIBkkANgZLBlEAOAZTBlMAPwBAAAABAgAAAQIAAAECAAABCAAAAQ4AAAEOAAABDgAAARQAAAFiAAABYgAAAhoAAAFiAAABYgAAAWIAAAEaAAABYgAAASAAAAFiAAABJgAAASYAAAFiAAABYgAAASwAAAEsAAABLAAAAVAAAAEyAAABRAAAATgAAAFiAAABPgAAAWgAAAFiAAABRAAAAUQAAAFiAAABdAAAAXQAAAFKAAABdAAAAVAAAAFWAAABVgAAAVYAAAFWAAABVgAAAVYAAAFWAAABVgAAAVYAAAFWAAABVgAAAVYAAAFWAAABXAAAAVwAAAFiAAABaAAAAWgAAAFoAAABaAAAAW4AAAFuAAABdAAB/XgEuAABAjoEnAABAk8EuAAB/aIEuAAB/Y4EuAAB/7AEuAABAAAEkAABAAADogABAAAEdAABAAAF8AABAAAEVAABAAAEYAABAAAExAABAAAEpAABAAAB/gABAAAE2AABAAAEuAABAAAEsAABAAAEnAABAAAEmgBAAJoAmgCCAIgAjgCUAJQAmgD0AKAApgDcANwA3ACsALgA9AD0APQA9ADuALIA3ADcANwAuAD0AMoAvgDcANwA1gD0APQAxADKANYA1gDQANYA9ADoAOgA3ADoAOgA6ADiAOIA6ADoAOgA6ADoAO4A7gD0APQA9AD0APQA9AD0APQAAf14BkAAAQI6BhgAAQI6BkAAAQI6BpAAAf14BpAAAQAABgQAAQAABzAAAf2OBwgAAQAABlQAAQAABuAAAQAACCAAAQAAB54AAQAAB0QAAQAABwgAAQAABrgAAQAABkAAAQAABiwAAQAABWQAAQAABvQAAQAABpAABgMAAAEACAABAAwADAABAGQBbAACAA4CZAJkAAAE9gT5AAEE/AUAAAUFAwUFAAoFCAUSAA0FGAUbABgFJAUkABwFJgUoAB0FLAUtACAFMgU1ACIFOAU5ACYGSgZKACgGUgZSACkGVAZUACoAKwAAAK4AAAECAAABAgAAAPAAAADwAAAAtAAAAQIAAAECAAAA8AAAAMYAAAC6AAAA/AAAANgAAAD2AAAAwAAAANgAAAECAAAA9gAAANgAAADYAAAA2AAAAMYAAADMAAAA3gAAANIAAAD2AAAA/AAAANgAAADYAAAA3gAAAOQAAADqAAAA9gAAAPAAAADwAAAA9gAAAPYAAAD2AAAA9gAAAPAAAAD2AAAA/AAAAQIAAf2c/34AAQAU/84AAQAA/2AAAQAA/6YAAQAA/3QAAQAA/1YAAf/s/9gAAQAA/4gAAQAA/9gAAQAA/7oAAQAA/5IAAQAA/8QAAQAA/7AAAQAA/84AAQAA/5wAKwBYAHYAdgCmAKYAXgCaAJoAZAB8AGoApgCmAJoAmgBwAHYAdgB2AHYAdgB8AIIApgCIAKYAoACOAJoApgCaAKAAmgCUAJoAoACgAKAAoACgAJoAoACmAAH9nP40AAEAFP3QAAEAAP3GAAEAAP4WAAEAAP40AAEAAP34AAEAAP5cAAEAAP5IAAH/7P3QAAEAAP4gAAEAAP2oAAEAAP3kAAEAAP28AAEAAP3QAAIACAACAAoA2gABACQABAAAAA0AQgBIAGYAbACKAJAAlgCkALoAugDAAMAAxgABAA0AKQDRAPABAAFgAWQBbwFzAYMBlAKaAqwCyQABACIAFAAHACIAUABFAB4ASwAeAE4AHgBPAB4A5wAeAOkAPAABAC0AMgAHACIARgBFAB4ASwAeAE4AHgBPAB4A5wAeAOkARgABAXT/7AABAXP/4gADAWT/9gF0/+wBiP/sAAUBZP/iAXD/9gFx/9gBdP/2AYj/9gABAYH/7AABAaMAMgACAaMAMgG+/+wAAQPYAAQAAAHnFRIVEge0GKAUxBigM4403ghANN40kgcwCEAJxjTeB7o03jnCNfoR7hHuCEA2tgxWB7Q0gDSYPOw0mAeiNIAI1jSANIA0mDSYCwA87DmcOZwI1jmcB7QzjjOOM44zjjOOM440kghANJI0kjSSNJI03jTeNN403jTeNN403jX6Nfo1+jX6NrYHujSANIA0gDSANIA0gDSYNJg0mDSYNJg0mDSYNJgPujSYOZw0mDmcM440gDOONIAzjjSACEAIQAhACEA03gmoNN40kjSYNJI0mDSSNJg0kjSYNJI0mDSACEAI1gjWCcYJxgnGCagJxgnGNIA03jTeNN40kgsACwALADnCPOw5wjzsOcI1+jX6Nfo1+jX6NfoR7jmcNrY5nDa2DFYMVgxWM440gDSSNN4NGDz+GuIavg+6DRgM7A1sDRgNDjz+DlANGDz+DUoNbBriDiIOUA5mGr4a4g6QNIAO3g6QD1QOljSADswO3g74DxYPVA/MD8wPag/MD3wPug/MERQpEiBOD+YRFBEUERQvjCeqJLwP8CXwKRIzfC+MJfAvjCY6HdIgTikSJ6oQNi+MM3wzfBEUERQmOiY6NIAmGiuINIA0gCV6JholeicwJzAriCjkJzAlejSANIAdNB00JzAnMDSAJiAriB00HTQmICV6KOQpEiuIEe45nBHuOZwR7jmcNrY5nBTEFMQUxBUSFRIYoBUSGKAavhrQGuIzjjSANbA2bDSAJjoo5By6HTQgTiS8G3gkvBt4HE4cTiXwHFgnMCY6JzAnqijkJ6oo5CjkKOQmOicwJjonMCY6KOQgTjN8NIAcuh00HdInMCweLnAeWB7uLvYwGiXwJhou9jAaL4wlei+MJXovjCV6M3w0gCweK4gfCB+uIBQnMCBOKRIriCDAIs4gwCLOLvYwGjN8NIAzfDSAIvwjpiP0NIAj9DSAL4wleiQWM2YzfDSAM3w0gDN8NIAkvDSAJLw0gDSANIAmOicwJjonMC+MJXol8CYaJiAmOicwJjonMCY6JzAmOicwJ6oo5CeqKOQnqijkKRIriCweLnAu9jAaL4wwGjCQMuwwkDLsM3w0gDCQMuwwkDLsMZIx8DI+MuwzZjN8NIAzjjSAM440gDOONIAzjjSAM440gDOONIAzjjSAM440gDOONIAzjjSAM440gDOONIA0kjSYNJI0mDSSNJg0kjSYNJI0mDSSNJg0kjSYNJI0mDTeNJg03jSYNN403jTeNN403jWwNbA1sDWwNbA1+jZsNmw2bDZsNmw2tjmcNrY5nDa2OZw5wjzsPP4AAgCOAAUABQAAAAoACwABAA8AEQADACQAKQAGAC4ALwAMADIANAAOADcAPgARAEQARgAZAEgASQAcAEsASwAeAE4ATgAfAFAAUwAgAFUAVQAkAFcAVwAlAFkAXAAmAF4AXgAqAIIAjQArAJIAkgA3AJQAmAA4AJoAoAA9AKIApwBEAKoArQBKALIAsgBOALQAtgBPALgAuABSALoAugBTAL8AyABUAMoAygBeAMwAzABfAM4AzgBgANAA0gBhANQA3QBkAOcA5wBuAPgA+wBvAP0A/QBzAP8BAQB0AQMBAwB3AQgBCAB4AQ4BDgB5ARABEAB6ARIBEgB7ARQBFAB8ARcBFwB9ARkBGQB+ARsBGwB/ASQBKACAASoBKgCFASwBLACGAS4BLgCHATABMACIATIBMgCJATQBNACKATYBOwCLAT0BPQCRAT8BPwCSAUMBRQCTAUcBRwCWAVYBVgCXAVsBYgCYAWQBZACgAWYBZgChAWgBaQCiAW0BbQCkAW8BbwClAXEBdgCmAXgBeQCsAXsBfACuAX4BfgCwAYABgACxAYMBiACyAYoBigC4AYwBjAC5AY4BjgC6AZABkAC7AZMBlAC8AZcBlwC+AZkBmQC/AZ0BoADAAaQBqADEAaoBrgDJAbABsQDOAbQBtADQAbgBuADRAboBwADSAcMBxADZAcYByADbAcoBygDeAcwB0QDfAdQB1ADlAdgB2ADmAdoB2gDnAdwB4ADoAeMB5ADtAeYB6ADvAeoB7ADyAfIB9gD1AfgCBAD6AgYCCAEHAgoCCgEKAgwCDAELAiECIQEMAlACUQENAlUCVgEPAl0CXQERAl8CXwESAmcCZwETAmkCbQEUAm8CcwEZAnUCdQEeAncCdwEfAnkCiQEgApICsAExArICvQFQAsACxQFcAscCzAFiAs8C0AFoAtMC1AFqAtYC2QFsAtsC2wFwAt0C5gFxAuwC+QF7AvwC/QGJAwADBQGLAwgDFgGRAxgDQQGgA0YDSgHKA0wDTAHPA04DTgHQA1ADUAHRA1IDUgHSA1UDVQHTA1cDVwHUA1kDWQHVA1sDWwHWA10DXgHXA2MDYwHZA2UDZQHaA2cDZwHbA2kDaQHcA2sDcQHdA34DfwHkBy8HLwHmABwAD//EABH/xAAk/+wAgv/sAIP/7ACE/+wAhf/sAIb/7ACH/+wAwv/sAMT/7ADG/+wBQ//sAgj/xAIM/8QCVf/sAxr/7AMc/+wDHv/sAyD/7AMi/+wDJP/sAyb/7AMo/+wDKv/sAyz/7AMu/+wDMP/sAAQABQA8AAoAPAIHADwCCwA8AAEALQBaACEAD/9+ABH/fgAk/84AO//sAD3/9gCC/84Ag//OAIT/zgCF/84Ahv/OAIf/zgDC/84AxP/OAMb/zgE7//YBPf/2AT//9gFD/84CCP9+Agz/fgJV/84DGv/OAxz/zgMe/84DIP/OAyL/zgMk/84DJv/OAyj/zgMq/84DLP/OAy7/zgMw/84AJQAm/+wAKv/sADL/7AA0/+wAif/sAJT/7ACV/+wAlv/sAJf/7ACY/+wAmv/sAMj/7ADK/+wAzP/sAM7/7ADe/+wA4P/sAOL/7ADk/+wBDv/sARD/7AES/+wBFP/sAUf/7AJc/+wDRv/sA0j/7ANK/+wDTP/sA07/7ANQ/+wDUv/sA1T/7ANW/+wDWP/sA1r/7ANc/+wANABG/+wAR//sAEj/7ABS/+wAVP/sAKL/7ACp/+wAqv/sAKv/7ACs/+wArf/sALT/7AC1/+wAtv/sALf/7AC4/+wAuv/sAMn/7ADL/+wAzf/sAM//7ADR/+wA0//sANX/7ADX/+wA2f/sANv/7ADd/+wBD//sARH/7AET/+wBFf/sAUj/7AJd/+wDM//sAzX/7AM3/+wDOf/sAz3/7AM//+wDQf/sA0f/7ANJ/+wDS//sA0//7ANR/+wDU//sA1X/7ANX/+wDWf/sA1v/7ANd/+wABwAFACgACgAoAAwARgBAAEYAYABGAgcAKAILACgATgAF/7AACv+wACb/7AAq/+wAMv/sADT/7AA3/+wAOP/2ADn/7AA6/+wAPP/iAIn/7ACU/+wAlf/sAJb/7ACX/+wAmP/sAJr/7ACb//YAnP/2AJ3/9gCe//YAn//iAMj/7ADK/+wAzP/sAM7/7ADe/+wA4P/sAOL/7ADk/+wBDv/sARD/7AES/+wBFP/sAST/7AEm/+wBKv/2ASz/9gEu//YBMP/2ATL/9gE0//YBNv/sATj/4gE6/+IBR//sAfr/7AH8/+wB/v/sAgD/4gIH/7ACC/+wAlz/7AJe//YDRv/sA0j/7ANK/+wDTP/sA07/7ANQ/+wDUv/sA1T/7ANW/+wDWP/sA1r/7ANc/+wDXv/2A2D/9gNi//YDZP/2A2b/9gNo//YDav/2A2z/4gNu/+IDcP/iA37/7ABVAAUAKAAKACgARP/sAEb/7ABH/+wASP/sAEr/9gBS/+wAVP/sAKL/7ACj/+wApP/sAKX/7ACm/+wAp//sAKj/7ACp/+wAqv/sAKv/7ACs/+wArf/sALT/7AC1/+wAtv/sALf/7AC4/+wAuv/sAMP/7ADF/+wAx//sAMn/7ADL/+wAzf/sAM//7ADR/+wA0//sANX/7ADX/+wA2f/sANv/7ADd/+wA3//2AOH/9gDj//YA5f/2AQ//7AER/+wBE//sARX/7AFE/+wBRv/sAUj/7AIHACgCCwAoAlb/7AJd/+wDG//sAx3/7AMf/+wDI//sAyX/7AMn/+wDKf/sAyv/7AMt/+wDL//sAzH/7AMz/+wDNf/sAzf/7AM5/+wDPf/sAz//7ANB/+wDR//sA0n/7ANL/+wDT//sA1H/7ANT/+wDVf/sA1f/7ANZ/+wDW//sA13/7AAlACb/9gAq//YAMv/2ADT/9gCJ//YAlP/2AJX/9gCW//YAl//2AJj/9gCa//YAyP/2AMr/9gDM//YAzv/2AN7/9gDg//YA4v/2AOT/9gEO//YBEP/2ARL/9gEU//YBR//2Alz/9gNG//YDSP/2A0r/9gNM//YDTv/2A1D/9gNS//YDVP/2A1b/9gNY//YDWv/2A1z/9gAIAA//2AAR/9gBVv/sAV//7AFi/+wBaf/sAgj/2AIM/9gAAgFm//YBbf/2AAwABf+6AAr/ugFm/+wBbf/sAXH/ugFy/8QBc//sAXX/2AF4/8QCB/+6Agv/ugJR/8QACAAP/34AEf9+AVb/zgFf/84BYv/OAWn/zgII/34CDP9+AC0AD//EABD/2AAR/8QBVv+wAV//sAFi/7ABZv/iAWn/sAFt/+IBc//OAXb/4gF5/7oBev/OAXv/zgF8/9gBff/OAX7/ugGA/+wBgf/iAYL/zgGE/84Bhv/YAYf/zgGJ/84Biv/sAYz/ugGO/84Bj/+6AZD/ugGS/84Bk/+6AZT/7AGV/84Blv/OAZj/zgGZ/7oBmv/OAZv/zgIC/9gCA//YAgT/2AII/8QCDP/EAiH/4gJQ/+wACwAP/84AEf/OAVb/7AFf/+wBYv/sAWn/7AFy/+IBeP/iAgj/zgIM/84CUf/iAAUBZv/sAW3/7AFz/+IBjf/2AZH/9gAKAA//xAAR/8QBVv/YAV//2AFi/9gBZv/2AWn/2AFt//YCCP/EAgz/xAABAYgAFAANABD/zgF5/+wBfv/sAYz/7AGN/+wBj//sAZD/7AGR/+wBk//sAZn/7AIC/84CA//OAgT/zgAEAA//7AAR/+wCCP/sAgz/7AAGAAX/2AAK/9gBjf/2AZH/9gIH/9gCC//YAAcBef/sAX7/7AGM/+wBj//sAZD/7AGT/+wBmf/sAA8ABf/EAAr/xAF5//YBfv/2AYD/7AGK/+wBjP/2AY3/7AGP//YBkP/2AZH/7AGT//YBmf/2Agf/xAIL/8QABQAP/9gAEf/YAYj/9gII/9gCDP/YAAQAD//2ABH/9gII//YCDP/2AA8AD//iABD/7AAR/+IBef/sAX7/7AGM/+wBj//sAZD/7AGT/+wBmf/sAgL/7AID/+wCBP/sAgj/4gIM/+IABAAF/+wACv/sAgf/7AIL/+wABgAF//YACv/2AYD/7AGK/+wCB//2Agv/9gACAwv/7AMN/+wAEQAF/+wACv/sAar/9gHB/+wCB//sAgv/7AJv//YCef/sArz/7AK+/+wCwv/sAsT/7ALR/+wC1v/2Atj/9gLa//YC+v/sADcAD//YABH/2AGd/+wBpP/sAab/7AGo/+IBqv/sAa7/7AGw/+wBsf/sAbX/7AG8/+IBvf/iAb//7AHE/+wBx//sAc7/9gHV//YB8v/2Agj/2AIM/9gCb//sAnD/9gJ3/+wCff/2An//9gKc/+wCnv/sAqb/7AKy/+ICtP/iArb/4gK4/+wCuv/sAsf/7ALL/+wCzP/2Atb/7ALY/+wC2v/sAuL/7ALk/+wC8v/sAvT/4gL2/+IC+P/iAwL/7AME/+wDCv/sAwz/7AMO/+wDD//2AxT/7AMY/+wDGf/2ADYABf/YAAr/2AGd/8QBpv/EAaj/7AG8/84Bvf/sAcH/zgHE/8QB3P/sAd3/7AHh/+wB5P/sAfb/7AIH/9gCC//YAmv/2AJ5/84Cff/YAn//2AKU/9gCmP/YAqT/2AKm/8QCp//sArL/zgKz/+wCtP/OArX/7AK2/84Ct//sArr/xAK7/+wCvP/OAr3/7AK+/84Cv//sAtH/zgLS/+wC9P/sAvX/7AL2/+wC9//sAvj/7AL5/+wC+v/OAvv/7AMA/9gDCv/OAwv/4gMM/84DDf/iAxT/xAMV/+wAtQAP/84AEf/OACIAFAAk/9gAJv/2ACr/9gAy//YANP/2AET/7ABG/+wAR//sAEj/7ABK//YAUP/2AFH/9gBS/+wAU//2AFT/7ABV//YAVv/2AFj/9gCC/9gAg//YAIT/2ACF/9gAhv/YAIf/2ACJ//YAlP/2AJX/9gCW//YAl//2AJj/9gCa//YAov/sAKP/7ACk/+wApf/sAKb/7ACn/+wAqP/sAKn/7ACq/+wAq//sAKz/7ACt/+wAtP/sALX/7AC2/+wAt//sALj/7AC6/+wAu//2ALz/9gC9//YAvv/2AML/2ADD/+wAxP/YAMX/7ADG/9gAx//sAMj/9gDJ/+wAyv/2AMv/7ADM//YAzf/sAM7/9gDP/+wA0f/sANP/7ADV/+wA1//sANn/7ADb/+wA3f/sAN7/9gDf//YA4P/2AOH/9gDi//YA4//2AOT/9gDl//YA+v/2AQb/9gEI//YBDf/2AQ7/9gEP/+wBEP/2ARH/7AES//YBE//sART/9gEV/+wBF//2ARn/9gEd//YBIf/2ASv/9gEt//YBL//2ATH/9gEz//YBNf/2AUP/2AFE/+wBRv/sAUf/9gFI/+wBSv/2Agj/zgIM/84CVP/2AlX/2AJW/+wCXP/2Al3/7AJf//YDGv/YAxv/7AMc/9gDHf/sAx7/2AMf/+wDIP/YAyL/2AMj/+wDJP/YAyX/7AMm/9gDJ//sAyj/2AMp/+wDKv/YAyv/7AMs/9gDLf/sAy7/2AMv/+wDMP/YAzH/7AMz/+wDNf/sAzf/7AM5/+wDPf/sAz//7ANB/+wDRv/2A0f/7ANI//YDSf/sA0r/9gNL/+wDTP/2A07/9gNP/+wDUP/2A1H/7ANS//YDU//sA1T/9gNV/+wDVv/2A1f/7ANY//YDWf/sA1r/9gNb/+wDXP/2A13/7ANf//YDYf/2A2P/9gNl//YDZ//2A2n/9gNr//YAEwA3/9gBJP/YASb/2AFx/9gBnf/YAab/2AG8/9gBxP/YAdz/7AHk/+wCpv/YAqf/7AKy/9gCs//sArr/2AK7/+wDFP/YAxX/7AN+/9gA4wAk/7oANwAUADkAFAA6ABQAPAAKAET/2ABG/8QAR//EAEj/xABK/+IAUP/iAFH/4gBS/8QAU//iAFT/xABV/+IAVv/iAFj/4gCC/7oAg/+6AIT/ugCF/7oAhv+6AIf/ugCfAAoAov/EAKP/2ACk/9gApf/YAKb/2ACn/9gAqP/YAKn/xACq/8QAq//EAKz/xACt/8QAtP/EALX/xAC2/8QAt//EALj/xAC6/8QAu//iALz/4gC9/+IAvv/iAML/ugDD/9gAxP+6AMX/2ADG/7oAx//YAMn/xADL/8QAzf/EAM//xADR/8QA0//EANX/xADX/8QA2f/EANv/xADd/8QA3//iAOH/4gDj/+IA5f/iAPr/4gEG/+IBCP/iAQ3/4gEP/8QBEf/EARP/xAEV/8QBF//iARn/4gEd/+IBIf/iASQAFAEmABQBK//iAS3/4gEv/+IBMf/iATP/4gE1/+IBNgAUATgACgE6AAoBQ/+6AUT/2AFG/9gBSP/EAUr/4gFW/7oBX/+6AWL/ugFp/7oBef/YAXr/7AF7/+wBfv/YAYH/4gGC/+wBg//sAYT/7AGH/+wBif/sAYz/2AGO/+IBj//YAZD/2AGT/9gBmf/YAaT/xAGq/7oBrv/EAbX/xAHK/+wBzv+6Ac//xAHV/7oB2P/EAdv/xAHe/8QB6v/EAe3/xAHu/+IB8v+6AfoAFAH8ABQB/gAUAgAACgJU/+ICVf+6Alb/2AJd/8QCX//iAmf/xAJv/7oCcP+6Anr/9gJ8/8QCgv/EAoT/xAKG/8QCiv/EAq//xAKx/8QCy//EAsz/ugLW/7oC1//sAtj/ugLZ/+wC2v+6Atv/7ALd/8QC3//sAuH/7ALt/8QC7//EAvH/xAMG/7oDB//EAwj/ugMJ/8QDDv/EAw//ugMT/8QDF//EAxj/xAMZ/7oDGv+6Axv/2AMc/7oDHf/YAx7/ugMf/9gDIP+6AyL/ugMj/9gDJP+6AyX/2AMm/7oDJ//YAyj/ugMp/9gDKv+6Ayv/2AMs/7oDLf/YAy7/ugMv/9gDMP+6AzH/2AMz/8QDNf/EAzf/xAM5/8QDPf/EAz//xANB/8QDR//EA0n/xANL/8QDT//EA1H/xANT/8QDVf/EA1f/xANZ/8QDW//EA13/xANf/+IDYf/iA2P/4gNl/+IDZ//iA2n/4gNr/+IDbAAKA24ACgNwAAoDfgAUAIcAJv/OACr/zgAy/84ANP/OADf/ugA4/+wAOf/EADr/xAA8/8QAif/OAJT/zgCV/84Alv/OAJf/zgCY/84Amv/OAJv/7ACc/+wAnf/sAJ7/7ACf/8QAyP/OAMr/zgDM/84Azv/OAN7/zgDg/84A4v/OAOT/zgEO/84BEP/OARL/zgEU/84BJP+6ASb/ugEq/+wBLP/sAS7/7AEw/+wBMv/sATT/7AE2/8QBOP/EATr/xAFH/84BZv/YAW3/2AFx/7oBcv/EAXP/zgF1/8QBeP/EAYX/7AGd/7oBn//OAab/ugG4/84Bu//OAbz/ugG+/9gBwf+wAcT/ugHc/84B4f/EAeT/zgH6/8QB/P/EAf7/xAIA/8QCUf/EAlz/zgJe/+wCaf/OAnn/sAJ7/84Cff/EAn//xAKB/84Cg//OAoX/zgKH/84Cif/OAqb/ugKn/84Crv/OArD/zgKy/7oCs//OArT/xAK2/8QCuv+6Arv/zgK8/7ACvf/EAr7/sAK//8QCwv/EAsT/xALR/7AC0v/EAuz/zgLu/84C8P/OAvr/sAL7/8QDCv/EAwv/zgMM/8QDDf/OAxL/zgMU/7oDFf/OA0b/zgNI/84DSv/OA0z/zgNO/84DUP/OA1L/zgNU/84DVv/OA1j/zgNa/84DXP/OA17/7ANg/+wDYv/sA2T/7ANm/+wDaP/sA2r/7ANs/8QDbv/EA3D/xAN+/7oABAFx/+wBcv/2AXj/9gJR//YABAAP/+IAEf/iAgj/4gIM/+IAJQAP/8QAEf/EAVb/xAFf/8QBYv/EAWb/7AFp/8QBbf/sAXP/4gF2//YBef/OAXr/2AF7/+IBfP/iAX3/4gF+/84Bgf/iAYL/2AGE/+IBhv/iAYf/4gGJ/+IBjP/OAY7/zgGP/84BkP/OAZL/4gGT/84Blf/iAZb/4gGY/+IBmf/OAZr/4gGb/+ICCP/EAgz/xAIh//YANQAF/7oACv+6Ac//7AHY/+wB2//sAdz/zgHd/+IB3v/sAeH/4gHk/84B6v/sAe3/7AH2/+ICB/+6Agv/ugJn/+wCav/sAnr/9gJ8/+wCfv/sAoD/7AKC/+wChP/sAob/7AKI/+wCiv/sAqf/zgKv/+wCsf/sArP/zgK1/+wCt//sArv/zgK9/+ICv//iAsP/7ALF/+wC0v/iAt3/7ALt/+wC7//sAvH/7AL1/+IC9//iAvn/4gL7/+IDB//sAwn/7AML/8QDDf/EAxP/7AMV/84DF//sAAIDCv/2Awz/9gAYAA//ugAR/7oBpP/iAar/2AGu/+IBtf/iAc7/7AHV/+wB8v/sAgj/ugIM/7oCb//YAnD/7ALL/+ICzP/sAtb/2ALY/9gC2v/YAwb/2AMI/9gDDv/iAw//7AMY/+IDGf/sAB4ABf/YAAr/2AGd/+wBpv/sAbz/2AHB/9gBxP/sAdz/7AHk/+wCB//YAgv/2AJ5/9gCff/iAn//4gKm/+wCp//sArL/2AKz/+wCtP/iArb/4gK6/+wCu//sArz/2AK+/9gC0f/YAvr/2AMK/84DDP/OAxT/7AMV/+wAJwAF/8QACv/EAdD/7AHc/84B3f/iAd//7AHh/9gB5P/OAfb/4gIH/8QCC//EAmr/7AJ+/+wCgP/sAoj/7AKd/+wCp//OArP/zgK1/+ICt//iArn/7AK7/84Cvf/YAr//2ALD/+wCxf/sAsj/7ALS/9gC4//sAuf/7AL1/+IC9//iAvn/4gL7/9gDA//sAwX/7AML/84DDf/OAxX/zgAhAA//fgAR/34BpP/EAar/zgGu/8QBsP/sAbX/xAG//+wBzv/OAdX/zgHy/84CCP9+Agz/fgJv/84CcP/OAnP/9gKc/+wCuP/sAsf/7ALL/8QCzP/OAtb/zgLY/84C2v/OAuL/7AMC/+wDBP/sAwb/2AMI/9gDDv/EAw//zgMY/8QDGf/OACUABf/iAAr/4gGd/+IBpv/iAbz/4gHB/9gBxP/iAdz/7AHh/+wB5P/sAgf/4gIL/+ICef/YAn3/4gJ//+ICpv/iAqf/7AKy/+ICs//sArT/7AK2/+wCuv/iArv/7AK8/9gCvf/sAr7/2AK//+wC0f/YAtL/7AL6/9gC+//sAwr/7AML/+IDDP/sAw3/4gMU/+IDFf/sAAYABf/iAAr/4gIH/+ICC//iAwv/7AMN/+wAKQAF/7AACv+wAZ3/zgGm/84BvP+mAcH/xAHE/84B3P/YAeH/7AHk/9gCB/+wAgv/sAJ5/8QCff+6An//ugKm/84Cp//YArL/pgKz/9gCtP/OArb/zgK6/84Cu//YArz/xAK9/+wCvv/EAr//7ALC/+ICw//sAsT/4gLF/+wC0f/EAtL/7AL6/8QC+//sAwr/pgML/9gDDP+mAw3/2AMU/84DFf/YABkABf+6AAr/ugHc/84B4f/sAeT/zgIH/7oCC/+6Amr/7AJ+/+wCgP/sAoj/7AKn/84Cs//OArX/7AK3/+wCu//OAr3/7AK//+wCw//sAsX/7ALS/+wC+//sAwv/ugMN/7oDFf/OAA4Bnf/sAab/7AG8/+IBxP/sAn3/9gJ///YCpv/sArL/4gK0//YCtv/2Arr/7AMK/+wDDP/sAxT/7AAcAZ//7AG4/+wBu//sAb7/7AHh/+wCaf/sAnv/7AKB/+wCg//sAoX/7AKH/+wCif/sAq7/7AKw/+wCvf/sAr//7ALC/+wCxP/sAtL/7ALs/+wC7v/sAvD/7AL7/+wDBv/sAwj/7AML/+wDDf/sAxL/7ACDAA//xAAR/8QBn//sAaT/2AGq/8QBrv/YAbX/2AG4/+wBu//sAb7/4gHK/9gBzP/iAc3/4gHO/84Bz//OAdL/4gHT/+IB1P/iAdX/zgHW/+IB1//iAdj/zgHZ/+IB2v/iAdv/zgHe/84B4P/iAeH/2AHi/+IB4//iAeX/4gHm/+IB6P/iAen/7AHq/84B6wAUAez/4gHt/84B7v/YAfL/zgHz/+IB9AAUAfX/4gH3/+IB+f/iAgj/xAIM/8QCZ//OAmj/4gJp/+wCbv/iAm//xAJw/84Ccv/iAnT/7AJ2/+ICev/sAnv/7AJ8/84Cgf/sAoL/zgKD/+wChP/OAoX/7AKG/84Ch//sAon/7AKK/84Ck//iApUAFAKX/+ICm//iAqH/4gKj/+ICpQAUAqn/4gKr/+ICrf/iAq7/7AKv/84CsP/sArH/zgK9/9gCv//YAsH/4gLD/9gCxf/YAsr/4gLL/9gCzP/OAs7/4gLQ/+IC0v/YAtT/4gLW/8QC1//YAtj/xALZ/9gC2v/EAtv/2ALd/84C3v/2At//2ALg//YC4f/YAun/4gLr/+IC7P/sAu3/zgLu/+wC7//OAvD/7ALx/84C+//YAv3/4gL//+IDBv/YAwf/zgMI/9gDCf/OAwv/7AMN/+wDDv/YAw//zgMR/+IDEv/sAxP/zgMW//YDF//OAxj/2AMZ/84ACwAP/9gAEf/YAc7/9gHV//YB8v/2Agj/2AIM/9gCcP/2Asz/9gMP//YDGf/2ACoABf/iAAr/4gGd/+wBpv/sAbz/xAHB/9gBxP/sAdz/7AHd//YB4f/2AeT/7AH2//YCB//iAgv/4gJ5/9gCff/iAn//4gKm/+wCp//sArL/xAKz/+wCtP/OArb/zgK6/+wCu//sArz/2AK9//YCvv/YAr//9gLR/9gC0v/2AvX/9gL3//YC+f/2Avr/2AL7//YDCv/YAwv/7AMM/9gDDf/sAxT/7AMV/+wAEwAF/84ACv/OAdz/7AHd/+wB5P/sAfb/7AIH/84CC//OAqf/7AKz/+wCtf/sArf/7AK7/+wC9f/sAvf/7AL5/+wDC//YAw3/2AMV/+wACAG8/+wCff/2An//9gKy/+wCtP/2Arb/9gMK//YDDP/2ACkABf/iAAr/4gG8/+wBwf/YAdz/4gHh/+wB5P/iAgf/4gIL/+ICav/2Ann/2AJ9/+wCfv/2An//7AKA//YCiP/2Aqf/4gKy/+wCs//iArT/7AK1//YCtv/sArf/9gK7/+ICvP/YAr3/7AK+/9gCv//sAsL/4gLD/+wCxP/iAsX/7ALR/9gC0v/sAvr/2AL7/+wDCv/sAwv/4gMM/+wDDf/iAxX/4gAvAAX/ugAK/7oBnf/OAab/zgG8/7oBvv/sAcH/zgHE/84B3P/sAeH/7AHk/+wCB/+6Agv/ugJr/+wCef/OAn3/2AJ//9gClP/sApj/7AKk/+wCpv/OAqf/7AKy/7oCs//sArT/xAK2/8QCuv/OArv/7AK8/84Cvf/sAr7/zgK//+wCwv/OAsT/zgLR/84C0v/sAt7/7ALg/+wC+v/OAvv/7AMA/+wDCv+6Awv/7AMM/7oDDf/sAxT/zgMV/+wAHQHP/+wB2P/sAdv/7AHe/+wB4f/sAer/7AHt/+wCZ//sAnz/7AKC/+wChP/sAob/7AKK/+wCr//sArH/7AK9/+wCv//sAsP/7ALF/+wC0v/sAt3/7ALt/+wC7//sAvH/7AL7/+wDB//sAwn/7AMT/+wDF//sAAoAD//YABH/2AII/9gCDP/YAn3/9gJ///YCtP/2Arb/9gMK/+wDDP/sAAEB6QAUAAYABf/2AAr/9gIH//YCC//2Awv/7AMN/+wAPQAP/9gAEf/YAZ3/9gGk/+wBpv/2Aaj/7AGq/+wBrv/sAbD/7AGx//YBtf/sAbz/4gG9/+wBv//sAcH/7AHE//YBx//2Ac7/9gHV//YB8v/2Agj/2AIM/9gCb//sAnD/9gJ3//YCef/sAn3/9gJ///YCnP/sAp7/9gKm//YCsv/iArT/9gK2//YCuP/sArr/9gK8/+wCvv/sAsf/7ALL/+wCzP/2AtH/7ALW/+wC2P/sAtr/7ALi/+wC5P/2AvL/9gL0/+wC9v/sAvj/7AL6/+wDAv/sAwT/7AMK/+wDDP/sAw7/7AMP//YDFP/2Axj/7AMZ//YAHgAF//YACv/2AdD/7AHc//YB3f/2Ad//7AHh//YB5P/2Afb/9gIH//YCC//2Ap3/7AKn//YCs//2Arn/7AK7//YCvf/2Ar//9gLI/+wC0v/2AuP/7AL1//YC9//2Avn/9gL7//YDA//sAwX/7AML//YDDf/2AxX/9gBOAA//xAAR/8QBn//2AaT/zgGq/7oBrv/OAbX/zgG4//YBu//2Ab7/4gHJ//YBzv/YAc//7AHV/9gB2P/sAdv/7AHe/+wB4f/sAer/7AHrADIB7f/sAe7/9gHy/9gB9AAyAgj/xAIM/8QCZ//sAmn/9gJv/7oCcP/YAnv/9gJ8/+wCgf/2AoL/7AKD//YChP/sAoX/9gKG/+wCh//2Aon/9gKK/+wClQAyAqUAMgKu//YCr//sArD/9gKx/+wCvf/sAr//7ALC/+wCw//iAsT/7ALF/+ICy//OAsz/2ALS/+wC1v+6Atj/ugLa/7oC3f/sAuz/9gLt/+wC7v/2Au//7ALw//YC8f/sAvv/7AMG/7oDB//sAwj/ugMJ/+wDDv/OAw//2AMS//YDE//sAxf/7AMY/84DGf/YAAsAD//YABH/2AHO/+wB1f/sAfL/7AII/9gCDP/YAnD/7ALM/+wDD//sAxn/7ACdAA//xAAQ/9gAEf/EAZ//7AGk/84Bqv+6Aa7/zgG1/84BuP/sAbv/7AG8ABQBvv/YAcz/zgHN/84Bzv/EAc//ugHQ/+wB0f/sAdL/zgHT/84B1P/OAdX/xAHW/84B1//OAdj/ugHZ/84B2v/OAdv/ugHc/9gB3f/YAd7/ugHf/+wB4P/OAeH/zgHi/84B4//OAeT/2AHl/84B5v/OAef/7AHo/84B6f/iAer/ugHs/84B7f+6Ae7/xAHy/8QB8//OAfX/zgH2/9gB9//OAfn/zgIC/9gCA//YAgT/2AII/8QCDP/EAmf/ugJo/84Caf/sAmr/7AJu/84Cb/+6AnD/xAJy/84CdP/OAnb/zgJ6/84Ce//sAnz/ugJ+/+wCgP/sAoH/7AKC/7oCg//sAoT/ugKF/+wChv+6Aof/7AKI/+wCif/sAor/ugKT/84Cl//OApv/zgKd/+wCn//sAqH/zgKj/84Cp//YAqn/zgKr/84Crf/OAq7/7AKv/7oCsP/sArH/ugKyABQCs//YArX/2AK3/9gCuf/sArv/2AK9/84Cv//OAsH/zgLC/84Cw/+6AsT/zgLF/7oCyP/sAsr/zgLL/84CzP/EAs7/zgLQ/84C0v/OAtT/zgLW/7oC2P+6Atr/ugLd/7oC4//sAuX/7ALn/+IC6f/OAuv/zgLs/+wC7f+6Au7/7ALv/7oC8P/sAvH/ugLz/+wC9f/YAvf/2AL5/9gC+//OAv3/zgL//84DA//sAwX/7AMG/7oDB/+6Awj/ugMJ/7oDC//OAw3/zgMO/84DD//EAxH/zgMS/+wDE/+6AxX/2AMX/7oDGP/OAxn/xAAlAA//zgAQ/+wAEf/OAc7/4gHP//YB1f/iAdj/9gHb//YB3v/2Aer/9gHt//YB8v/iAgL/7AID/+wCBP/sAgj/zgIM/84CZ//2AnD/4gJ8//YCgv/2AoT/9gKG//YCiv/2Aq//9gKx//YCzP/iAt3/9gLt//YC7//2AvH/9gMH//YDCf/2Aw//4gMT//YDF//2Axn/4gCUAA//zgAQ/+wAEf/OAZ0AFAGf/+wBpP/YAaYAFAGq/8QBrv/YAbX/2AG4/+wBu//sAbwAFAG+/+IBxAAUAcz/4gHN/+IBzv/OAc//2AHQ/+wB0f/sAdL/4gHT/+IB1P/iAdX/zgHW/+IB1//iAdj/2AHZ/+IB2v/iAdv/2AHe/9gB3//sAeD/4gHh/84B4v/iAeP/4gHl/+IB5v/iAef/7AHo/+IB6v/YAesAFAHs/+IB7f/YAe7/4gHy/84B8//iAfQAFAH1/+IB9//iAfn/4gIC/+wCA//sAgT/7AII/84CDP/OAmf/2AJo/+ICaf/sAm7/4gJv/8QCcP/OAnL/4gJ0/+wCdv/iAnr/4gJ7/+wCfP/YAoH/7AKC/9gCg//sAoT/2AKF/+wChv/YAof/7AKJ/+wCiv/YApP/4gKVABQCl//iApv/4gKd/+wCn//sAqH/4gKj/+ICpQAUAqYAFAKp/+ICq//iAq3/4gKu/+wCr//YArD/7AKx/9gCsgAUArn/7AK6ABQCvf/OAr//zgLB/+ICwv/sAsP/4gLE/+wCxf/iAsj/7ALK/+ICy//YAsz/zgLO/+IC0P/iAtL/zgLU/+IC1v/EAtj/xALa/8QC3f/YAuP/7ALl/+wC6f/iAuv/4gLs/+wC7f/YAu7/7ALv/9gC8P/sAvH/2ALz/+wC+//OAv3/4gL//+IDA//sAwX/7AMG/84DB//YAwj/zgMJ/9gDC//sAw3/7AMO/9gDD//OAxH/4gMS/+wDE//YAxQAFAMX/9gDGP/YAxn/zgAhAA//4gAR/+IBzv/iAc//7AHV/+IB2P/sAdv/7AHe/+wB6v/sAe3/7AHy/+ICCP/iAgz/4gJn/+wCcP/iAnz/7AKC/+wChP/sAob/7AKK/+wCr//sArH/7ALM/+IC3f/sAu3/7ALv/+wC8f/sAwf/7AMJ/+wDD//iAxP/7AMX/+wDGf/iACUBn//sAaMAbgG4/+wBu//sAb7/4gHc/+wB4f/YAeT/7AJp/+wCeAAeAnr/9gJ7/+wCgf/sAoP/7AKF/+wCh//sAon/7AKn/+wCrv/sArD/7AKz/+wCu//sAr3/2AK//9gCwv/iAsP/7ALE/+ICxf/sAtL/2ALs/+wC7v/sAvD/7AL7/9gDC//sAw3/7AMS/+wDFf/sACMBn//sAbj/7AG7/+wBvv/sAcH/7AHh/+wCaf/sAnn/7AJ7/+wCgf/sAoP/7AKF/+wCh//sAon/7AKu/+wCsP/sArz/7AK9/+wCvv/sAr//7ALC/84CxP/OAtH/7ALS/+wC7P/sAu7/7ALw/+wC+v/sAvv/7AMG/+wDCP/sAwv/7AMN/+wDEv/sAxb/9gAdAc//9gHY//YB2//2Ad7/9gHh//YB6v/2Ae3/9gJn//YCfP/2AoL/9gKE//YChv/2Aor/9gKv//YCsf/2Ar3/9gK///YC0v/2At3/9gLt//YC7//2AvH/9gL7//YDB//2Awn/9gML/+wDDf/sAxP/9gMX//YAQAAF/84ACv/OAZ3/2AGm/9gBqP/iAar/4gGw/+IBvP+6Ab3/4gG//+IBwf/iAcT/2AHQ/+wB3P/iAd//7AHh/+wB5P/iAgf/zgIL/84Cb//iAnP/7AJ5/+ICff/iAn//4gKc/+ICnf/sAqb/2AKn/+ICsv+6ArP/4gK0/+ICtv/iArj/4gK5/+wCuv/YArv/4gK8/+ICvf/sAr7/4gK//+wCx//iAsj/7ALR/+IC0v/sAtb/4gLY/+IC2v/iAuL/4gLj/+wC9P/iAvb/4gL4/+IC+v/iAvv/7AMC/+IDA//sAwT/4gMF/+wDCv/sAwv/7AMM/+wDDf/sAxT/2AMV/+IAFwAP/9gAEf/YAar/9gGw/+wBvP/sAb//7AII/9gCDP/YAm//9gJ9//YCf//2Apz/7AKy/+wCtP/2Arb/9gK4/+wCx//sAtb/9gLY//YC2v/2AuL/7AMC/+wDBP/sABMABf/sAAr/7AHQ//YB3f/2Ad//9gH2//YCB//sAgv/7AKd//YCuf/2Asj/9gLj//YC9f/2Avf/9gL5//YDA//2AwX/9gML/+wDDf/sACsABf/YAAr/2AGd/+IBpv/iAar/7AGw/+wBvP/iAb//7AHB/+wBxP/iAdz/7AHk/+wCB//YAgv/2AJv/+wCef/sAn3/7AJ//+wCnP/sAqb/4gKn/+wCsv/iArP/7AK0/+wCtv/sArj/7AK6/+ICu//sArz/7AK+/+wCx//sAtH/7ALW/+wC2P/sAtr/7ALi/+wC+v/sAwL/7AME/+wDCv/sAwz/7AMU/+IDFf/sAB4ABf/OAAr/zgHQ/+wB3P/iAd3/7AHf/+wB4f/sAeT/4gH2/+wCB//OAgv/zgKd/+wCp//iArP/4gK5/+wCu//iAr3/7AK//+wCyP/sAtL/7ALj/+wC9f/sAvf/7AL5/+wC+//sAwP/7AMF/+wDC//OAw3/zgMV/+IABQHh/+wCvf/sAr//7ALS/+wC+//sAAQBowBuAucAFAML/+wDDf/sADwABf+6AAr/ugAm/+wAKv/sAC0AggAy/+wANP/sADf/ugA5/9gAOv/YADz/xACJ/+wAlP/sAJX/7ACW/+wAl//sAJj/7ACa/+wAn//EAMj/7ADK/+wAzP/sAM7/7ADe/+wA4P/sAOL/7ADk/+wBDv/sARD/7AES/+wBFP/sAST/ugEm/7oBNv/YATj/xAE6/8QBR//sAfr/2AH8/9gB/v/YAgD/xAIH/7oCC/+6Alz/7ANG/+wDSP/sA0r/7ANM/+wDTv/sA1D/7ANS/+wDVP/sA1b/7ANY/+wDWv/sA1z/7ANs/8QDbv/EA3D/xAN+/7oABAAF//YACv/2Agf/9gIL//YAAQAtADwAEQAF//YACv/2AFn/7ABa/+wAW//sAFz/7ABd//YAv//sATf/7AE8//YBPv/2AUD/9gH7/+wB/f/sAgf/9gIL//YDbf/sADQAD//YABH/2AAk/+wAN//iADn/9gA6//YAO//sADz/9gA9//YAgv/sAIP/7ACE/+wAhf/sAIb/7ACH/+wAn//2AML/7ADE/+wAxv/sAST/4gEm/+IBNv/2ATj/9gE6//YBO//2AT3/9gE///YBQ//sAaD/9gH6//YB/P/2Af7/9gIA//YCCP/YAgz/2AJV/+wDGv/sAxz/7AMe/+wDIP/sAyL/7AMk/+wDJv/sAyj/7AMq/+wDLP/sAy7/7AMw/+wDbP/2A27/9gNw//YDfv/iABIASQAoAFcAKABZADIAWgAyAFsAMgBcADIAvwAyASUAKAEnACgBNwAyAfsAMgH9ADICNAAoAjUAKAJaACgCWwAoA20AMgN/ACgAHAAP/+wAEf/sACT/9gCC//YAg//2AIT/9gCF//YAhv/2AIf/9gDC//YAxP/2AMb/9gFD//YCCP/sAgz/7AJV//YDGv/2Axz/9gMe//YDIP/2AyL/9gMk//YDJv/2Ayj/9gMq//YDLP/2Ay7/9gMw//YAEgBJADIAVwAyAFkAMgBaADIAWwAyAFwAMgC/ADIBJQAyAScAMgE3ADIB+wAyAf0AMgI0ADICNQAyAloAMgJbADIDbQAyA38AMgC5AA//xAAR/8QAIgAUACT/xAAm/+wAKv/sADL/7AA0/+wARP/OAEb/zgBH/84ASP/OAEr/7ABQ/+IAUf/iAFL/zgBT/+IAVP/OAFX/4gBW/9gAWP/iAF3/7ACC/8QAg//EAIT/xACF/8QAhv/EAIf/xACJ/+wAlP/sAJX/7ACW/+wAl//sAJj/7ACa/+wAov/OAKP/zgCk/84Apf/OAKb/zgCn/84AqP/OAKn/zgCq/84Aq//OAKz/zgCt/84AtP/OALX/zgC2/84At//OALj/zgC6/84Au//iALz/4gC9/+IAvv/iAML/xADD/84AxP/EAMX/zgDG/8QAx//OAMj/7ADJ/84Ayv/sAMv/zgDM/+wAzf/OAM7/7ADP/84A0f/OANP/zgDV/84A1//OANn/zgDb/84A3f/OAN7/7ADf/+wA4P/sAOH/7ADi/+wA4//sAOT/7ADl/+wA+v/iAQb/4gEI/+IBDf/iAQ7/7AEP/84BEP/sARH/zgES/+wBE//OART/7AEV/84BF//iARn/4gEd/9gBIf/YASv/4gEt/+IBL//iATH/4gEz/+IBNf/iATz/7AE+/+wBQP/sAUP/xAFE/84BRv/OAUf/7AFI/84BSv/YAgj/xAIM/8QCVP/iAlX/xAJW/84CXP/sAl3/zgJf/+IDGv/EAxv/zgMc/8QDHf/OAx7/xAMf/84DIP/EAyL/xAMj/84DJP/EAyX/zgMm/8QDJ//OAyj/xAMp/84DKv/EAyv/zgMs/8QDLf/OAy7/xAMv/84DMP/EAzH/zgMz/84DNf/OAzf/zgM5/84DPf/OAz//zgNB/84DRv/sA0f/zgNI/+wDSf/OA0r/7ANL/84DTP/sA07/7ANP/84DUP/sA1H/zgNS/+wDU//OA1T/7ANV/84DVv/sA1f/zgNY/+wDWf/OA1r/7ANb/84DXP/sA13/zgNf/+IDYf/iA2P/4gNl/+IDZ//iA2n/4gNr/+IACQAFACgACgAoAA//2AAR/9gAIgAUAgcAKAII/9gCCwAoAgz/2ADKAA//xAAQ/9gAEf/EACIAFAAk/7oAJv/sACr/7AAy/+wANP/sADcAFABE/7AARv+6AEf/ugBI/7oASv+6AFD/zgBR/84AUv+6AFP/zgBU/7oAVf/OAFb/xABY/84AWf/sAFr/7ABb/+wAXP/sAF3/2ACC/7oAg/+6AIT/ugCF/7oAhv+6AIf/ugCJ/+wAlP/sAJX/7ACW/+wAl//sAJj/7ACa/+wAov+6AKP/sACk/7AApf+wAKb/sACn/7AAqP+wAKn/ugCq/7oAq/+6AKz/ugCt/7oAtP+6ALX/ugC2/7oAt/+6ALj/ugC6/7oAu//OALz/zgC9/84Avv/OAL//7ADC/7oAw/+wAMT/ugDF/7AAxv+6AMf/sADI/+wAyf+6AMr/7ADL/7oAzP/sAM3/ugDO/+wAz/+6ANH/ugDT/7oA1f+6ANf/ugDZ/7oA2/+6AN3/ugDe/+wA3/+6AOD/7ADh/7oA4v/sAOP/ugDk/+wA5f+6APr/zgEG/84BCP/OAQ3/zgEO/+wBD/+6ARD/7AER/7oBEv/sARP/ugEU/+wBFf+6ARf/zgEZ/84BHf/EASH/xAEkABQBJgAUASv/zgEt/84BL//OATH/zgEz/84BNf/OATf/7AE8/9gBPv/YAUD/2AFD/7oBRP+wAUb/sAFH/+wBSP+6AUr/xAH7/+wB/f/sAgL/2AID/9gCBP/YAgj/xAIM/8QCVP/OAlX/ugJW/7ACXP/sAl3/ugJf/84DGv+6Axv/sAMc/7oDHf+wAx7/ugMf/7ADIP+6AyL/ugMj/7ADJP+6AyX/sAMm/7oDJ/+wAyj/ugMp/7ADKv+6Ayv/sAMs/7oDLf+wAy7/ugMv/7ADMP+6AzH/sAMz/7oDNf+6Azf/ugM5/7oDPf+6Az//ugNB/7oDRv/sA0f/ugNI/+wDSf+6A0r/7ANL/7oDTP/sA07/7ANP/7oDUP/sA1H/ugNS/+wDU/+6A1T/7ANV/7oDVv/sA1f/ugNY/+wDWf+6A1r/7ANb/7oDXP/sA13/ugNf/84DYf/OA2P/zgNl/84DZ//OA2n/zgNr/84Dbf/sA34AFAAEAAUAFAAKABQCBwAUAgsAFAARAA//2AAR/9gBVv/sAV//7AFi/+wBZP/2AWn/7AFw//YBcf/iAXL/9gF0/+wBdf/2AXj/9gGI//YCCP/YAgz/2AJR//YABAAAAAEACAABAAwAQAABASQCLgACAAgJbglvAAAJsAmzAAIJugm6AAYJvAm9AAcKDwoPAAkLBAsPAAoLHAscABYL3wvjABcAAQBwCXYJdwl4CXkJfAmCCYYJiAmMCY0JjgmPCZMJmAmgCaEJvgnCCcMJxAnGCccJ3AnoCekJ6gnzCfQJ9wn5CfwJ/QoBClcKWwpdCmEKYgpjCmQKaApsCnMKewp/CoEKhQqGCocKiAqMCpAKlwrvCvAK8wr0CvcK+Ar5CvoK+wr8Cv0K/gr/CwALMQs0CzULNgs3CzkLOws8Cz0LPwtDC0QLRQtGC0cLSAtKC1gLWgtbC14LXwtgC2ELYgtjC2QLtgu3C7gLuQu6C7sLvAu9C74LvwvAC8ELwgvDC8QLxQvdC94AHAAAAPIAAAByAAAAegAAAPIAAAD6AAABAgAAAIIAAACCAAAAggAAAIoAAACSAAAAsgAAALoAAACaAAAAogAAAKoAAADaAAAAsgAAALoAAADCAAAAygAAANIAAADaAAAA4gAAAOoAAADyAAAA+gAAAQIAAv6kBPoAEAAC/qQE+gAOAAL+pAT6AAQAAv6kBPoAFQAC/qQE+gAfAAL+pAT6ACoAAv6kBPoAKQAC/qQE+gA1AAL+pAT6ACIAAv6kBPoALgAC/qQE+gAzAAL+pAT6ADIAAv6kBPoAPgAC/qQE+gAhAAL+pAT6ACgAAv6kBPoAPQAC/qQE+gAaAAL+pAT6ABEAAv6kBPoAIwBwAOIBCgESARoA6gDyAUIBSgFSAVoBYgFqATIA+gGCAYIA8gFiAWoA+gEiASoBAgEKARIBGgEiASoBQgFKAVIBWgEyAToBQgFKAVIBWgFiAWoBcgF6AYIBOgFCAUoBUgFaAWIBagFyAXoBggGKAZIBigGSAaIBqgGaAboBwgGiAaoBsgG6AcIBygHSAdoB4gHqAfIB+gICAgoCEgIaAiICKgIyAjoCQgJKAlICWgJiAmoCcgJ6AnoCegJ6AoICigKSApoCogKqArICugLCAooCkgKaAqICqgKyAroCwgLKAtIAAgLoBPoAKgACAygE+gA2AAIC9wT6ADoAAgMJBPoAMgACA5UE+gA2AAIC6AT6ADwAAgMOBPoASQACA8ME+gBOAAIDDgT6AFwAAgPDBPoAYQACAlwE+gA1AAIC9wT6ADkAAgOVBPoARAACA6ME+gBMAAICUgT6ACIAAgJSBPoAJwACA5UE+gA0AAICcAT6ADQAAgJcBPoAMgACAwkE+gAzAAIEHAT6AEIAAgLkBPoASQACAuQE+gBbAAICXAT6AFEAAgHqBPoANwACAeoE+gBGAAICXAT6AF4AAgJcBPoAVAACAlwE+gBNAAID2wT6ADMAAgJSBPoAQwACAlIE+gBMAAICUgT6AEQAAgJSBPoAUQACAlIE+gBFAAIDlQT6AF0AAgOVBPoAWQACA5UE+gBmAAICcAT6AFwAAgPsBPoAWQACAlwE+gA+AAIDPQT6AEwAAgSHBPoAXQACAykE+gBHAAIEiAT6AGcAAgKFBPoARQACA2ME+gA2AAIEJAT6AFAAAgOOBPoARgACBEIE+gBYAAIEJAT6AFMAAgPDBPoAOgACBtkE+gBeAAIDlQT6AEMAAgOjBPoASgACAlIE+gAkAAICUgT6ACUAAgOVBPoAMwACAnAE+gAxAAIEHAT6AEMAAgPDBPoANAACAoUE+gBDAAIEwwT6AFoABAAAAAEACAABAAwALgABAJICQAACAAUJrAmvAAAJuAm4AAQJyAnJAAULpgu1AAcLxwvcABcAAQAwCYIJhAmGCYgJjAmNCY4JjwmTCZUJmAmdCZ4JoAmhCb4JxAncCgEKVwpZCmgKagpsCnsKjAqQCzELQwtEC0ULRgtHC0gLSgtWC1gLYAtkC7YLtwu4C7kLugu7C7wL3QveAC0AAAF2AAABfgAAAYYAAAGOAAABpgAAALYAAAC+AAAAxgAAAM4AAADWAAAA3gAAAOYAAADuAAAA9gAAAP4AAAEGAAABDgAAARYAAAEeAAABJgAAAS4AAAE2AAABfgAAAT4AAAE+AAABPgAAAUYAAAFGAAABRgAAAU4AAAFOAAABTgAAAVYAAAFeAAABZgAAAW4AAAFuAAABbgAAAXYAAAF+AAABhgAAAY4AAAGWAAABngAAAaYAAv6kAAAATgAC/qQAAABhAAL+pAAAAC0AAv4iAAAALQAC/qQAAAAdAAL+pAAAADAAAv4iAAAAMAAC/qQAAAAoAAL+IgAAACgAAv6kAAAAOQAC/iIAAAA5AAL+IgAAAE4AAv6kAAAAXgAC/iIAAABeAAL+pAAAAHEAAv4iAAAAYQAC/iIAAABxAAL+VAAAACQAAv5UAAAAJgAC/lQAAAAdAAL+VAAAADAAAv5UAAAARwAC/lQAAABZAAL+VAAAABcAAv6kAAAAHwAC/qQAAAAgAAL+pAAAABgAAv6kAAAAKQAC/iIAAABAAAL+IgAAAFIAAv6kAAAAEAAwAMoAYgBqAHIAegCCAIoAkgCyAMIA2gCaAJoAogCiAMoA2gCqALIAygC6ANIAwgDaAMoA0gDaAOIA6gDyAPoBAgEKARIBGgEiASoBMgE6AUIBSgFSAVoBYgFqAXIBegGCAAIDJgAAABUAAgMOAAAARQACAywAAABNAAICUgAAACMAAgKUAAAAKAACAw4AAAA1AAICcAAAADUAAgG6AAAAJAACBBwAAABDAAIDDgAAADcAAgMsAAAANgACAyYAAAAZAAIDFAAAAAYAAgL3AAAAGQACAywAAAAzAAIDCQAAAAAAAgPbAAAAGQACBNAAAABaAAIDQAAAAD8AAgQhAAAATQACBWsAAABeAAIEDQAAAEgAAgVsAAAAaAACA1QAAABGAAICqgAAAC0AAgKWAAAANwACA8MAAAA7AAIG2QAAAF8AAgMGAAAARAACAwkAAABLAAICTgAAACUAAgKjAAAAJgACAwYAAAA0AAICkAAAADIAAgQsAAAARAACAx0AAABEAAIFjgAAAFsAAgAIAAEACAABACgABAAAAA8ASgCAAJYAyADOAPABMgFUAX4BwAHGAdQB8gIIAg4AAQAPChEKEgoYChoKIAokCiYKJwosCi0KLgowCjIKMwtCAA0Jgv+cCYP/nAmJ/5wJjP+cCZD/iAmR/34Jk/+cCZf/iAmY/4gJnP+ICZ//nAmi/5wJpf+cAAUJg/9gCZH/Vgmb/5IJnP9+CaL/iAAMCYL/fgmJ/+IJiv9qCYz/fgmO/3QJkf9+CZP/fgmV/8QJmf9+CZv/fgmc/34Jov9WAAEJif/EAAgJgv+ICYP/iAmX/4gJmP+ICZz/kgmf/3QJov+ICaX/iAAQCYL/pgmI/6YJjP+mCZH/nAmT/5wJl/9+CZj/fgmc/34Jov+mCaX/pgmm/4gKaP+cCvn/nAr6/5wLR/7UC0j/zgAICYn/nAmM/5wJkf9+CZf/iAmY/4gJnP+ICZ//nAml/5wACgmJ/5wJiv+cCZH/iAmT/5wJmf+cCZv/nAmc/3QJn/+SCaL/nAml/8QAEAmC/5wJif/OCYz/pgmN/7AJjv+wCY//nAmR/5IJk/+cCZf/iAmY/4gJmf+cCZv/ugmc/7AJn/+SCaL/nAmm/4gAAQmc/0wAAwmf/5IJov+cCab/2AAHCYL/ugmQ/5IJl/+SCZj/kgmb/+wJnP/OCaT/kgAFCZD/xAmV/7oJm//YCZz/ugmf/84AAQmi/84AAQmi/4gACAAAAAEACAACAUwAEAEQAmYAAgAAARgAAgAqCW4JbwACCaYJpgABCawJrwABCbAJswACCbgJuAABCcgJyQABCg8KDwACChAKEAABClsKWwABCl0KXQABCmEKZAABCnMKcwABCngKeAABCn8KfwABCoEKgQABCoUKiAABCpcKlwABCu0K9gABCvkK+wABCv4LAAABCwQLDwACCxwLHAACCzQLNAABCzYLNgABCzkLOQABCzsLPAABCz8LPwABC0MLSgABC1ALUAABC1ILUgABC1cLVwABC1kLXwABC2ILYwABC6YLrAABC68LrwABC7MLswABC8cLxwABC8oLygABC80LzQABC9AL0wABC9YL2AABC9wL3AABAAEJuwABAAEAAgAGABQAAQABAAEAAAABAAAAEAACAAIAAQABAAAAAQAAABAAAQAAAAEACAABAAgAAv4gAAEAAQm7AAgAAAABAAgAAgF4ABAA/gEMAAIAAAEUAAIAJwluCW8AAQlxCXEAAQl1CXUAAQl6CXsAAQl9CYEAAQmCCaYAAwmqCaoABAmrCasAAgmwCbcAAQm+CcUAAwnYCdgAAQniCeIAAQnmCeYAAQnrCewAAQnuCfIAAQn1CfUAAQn2Cg4AAwoPCg8AAQpXCpwAAwsBCzAAAQsxCzEAAws0CzQAAws2CzYAAws4CzkAAws7CzwAAws/Cz8AAwtBC0EAAwtDC0gAAwtKC0wAAwtOC1AAAwtSC1IAAwtVC2MAAwtlC2UAAwtnC2cAAwtpC3QABAuZC5kABQuaC5wAAgudC6UAAQvfC+sAAQABCboABAABAAAAAQABAAEJawABAAEABAAKABgAJgA2AAEAAQABAAAAAQAAABIAAQACAAEAAAABAAAAEwACAAMABAABAAAAAQAAABMAAQAFAAEAAAABAAAAEgABAAAAAQAIAAEAGAACAfQAAQAAAAEACAABAAgAAgDIAAEAAwm6CbwJvQABAAAACgDaAdYABWN5cmwAIGRldjIALGRldmEAdGdyZWsAuGxhdG4AxAAEAAAAAP//AAEAAAAKAAFNQVIgACgAAP//AAwADQAEABEAEAAGAAoACAAOAAMABwAPAAsAAP//AA0ADAANAAQAEQAQAAYACgAIAA4AAwAHAA8ACwAKAAFNQVIgACYAAP//AAsADQAEABEABQAJABIADgADAAcADwALAAD//wAMAAwADQAEABEABQAJABIADgADAAcADwALAAQAAAAA//8AAQABAAQAAAAA//8AAQACABNjY21wAHRjY21wAHRjY21wAHRhYnZzAH5ha2huAJBibHdmAJZibHdmAJxibHdzAKJjamN0ALBoYWxmALZoYWxmALxoYWxuAMRsb2NsAMpudWt0ANBwcmVzANZwc3RzAN5ya3JmAORycGhmAOx2YXR1APIAAAADAAAAAQACAAAABwAWABgAGQAaABwATABRAAAAAQAGAAAAAQAJAAAAAQARAAAABQBTAFgAWQBeAF8AAAABABMAAAABAAwAAAACAA0ADgAAAAEAZwAAAAEABAAAAAEABQAAAAIAFAAVAAAAAQBjAAAAAgAIAAoAAAABAAcAAAADAA8AEAARAGgA0gZOBswIcAiOCKgLigu8C9YP2A/4ECoQShEyFL4XoBs6HqQe7B8IIDYiEiJaIr4jhCOsJiAmVCZoLzIyGDTgOBA6mjz0PyRBZkOQRa5IZEpUTDpOJlAGUThS6FSeVjBXklliWmJbLFxAXPZdrF5iXxJfyGCQYUxh5GJYYwpjiGP8Z+hn9mgEaBJoIGguaDxoSmhYaGZodGiIaWRpgmmQaZ5ptms4a0xsuGzSbahtym9ycWJzYnN2dbR1/HYWd9p4DnggeDp4VHnIeeB5+HoeAAQAAAABAAgAAQVqAAUAEAEiAjQDRgRYABwAOgBCAEoAUgBaAGIAagByAHoAggCIAJAAmACgAKgAsAC2AL4AxgDOANYA3gDkAOwA9AD8AQQBDAiPAAMEygTLCJAAAwTKBMwIkQADBMoEzQiSAAMEygTOCJMAAwTLBMoIlAADBMsEywiVAAMEywTMCJYAAwTLBM0IlwADBMsEzgiYAAIEywiZAAMEzATKCJoAAwTMBMsImwADBMwEzAicAAMEzATNCJ0AAwTMBM4IngACBMwInwADBM0EygigAAMEzQTLCKEAAwTNBMwIogADBM0EzQijAAMEzQTOCKQAAgTNCKUAAwTOBMoIpgADBM4EywinAAMEzgTMCKgAAwTOBM0IqQADBM4EzgiqAAIEzgAcADoAQgBKAFIAWgBiAGgAcAB4AIAAiACQAJgAoACoALAAtgC+AMYAzgDWAN4A5ADsAPQA/AEEAQwIqwADBMoEygisAAMEygTLCK0AAwTKBMwIrgADBMoEzQivAAMEygTOCLAAAgTKCLEAAwTLBMoIsgADBMsEzAizAAMEywTNCLQAAwTLBM4ItQADBMwEygi2AAMEzATLCLcAAwTMBMwIuAADBMwEzQi5AAMEzATOCLoAAgTMCLsAAwTNBMoIvAADBM0Eywi9AAMEzQTMCL4AAwTNBM0IvwADBM0EzgjAAAIEzQjBAAMEzgTKCMIAAwTOBMsIwwADBM4EzAjEAAMEzgTNCMUAAwTOBM4IxgACBM4AHAA6AEIASgBSAFoAYgBoAHAAeACAAIgAkACWAJ4ApgCuALYAvgDGAM4A1gDeAOQA7AD0APwBBAEMCMcAAwTKBMoIyAADBMoEywjJAAMEygTMCMoAAwTKBM0IywADBMoEzgjMAAIEygjNAAMEywTKCM4AAwTLBMsIzwADBMsEzAjQAAMEywTNCNEAAwTLBM4I0gACBMsI0wADBMwEygjUAAMEzATLCNUAAwTMBM0I1gADBMwEzgjXAAMEzQTKCNgAAwTNBMsI2QADBM0EzAjaAAMEzQTNCNsAAwTNBM4I3AACBM0I3QADBM4EygjeAAMEzgTLCN8AAwTOBMwI4AADBM4EzQjhAAMEzgTOCOIAAgTOABwAOgBCAEoAUgBaAGIAaABwAHgAgACIAJAAlgCeAKYArgC2AL4AxADMANQA3ADkAOoA8gD6AQIBCgjjAAMEygTKCOQAAwTKBMsI5QADBMoEzAjmAAMEygTNCOcAAwTKBM4I6AACBMoI6QADBMsEygjqAAMEywTLCOsAAwTLBMwI7AADBMsEzQjtAAMEywTOCO4AAgTLCO8AAwTMBMoI8AADBMwEywjxAAMEzATMCPIAAwTMBM0I8wADBMwEzgj0AAIEzAj1AAMEzQTKCPYAAwTNBMsI9wADBM0EzAj4AAMEzQTOCP4AAgTOCPkAAwTOBMoI+gADBM4Eywj7AAMEzgTMCPwAAwTOBM0I/QADBM4EzgAcADoAQgBKAFIAWgBiAGgAcAB4AIAAiACQAJYAngCmAK4AtgC+AMQAzADUANwA5ADsAPIA+gECAQoI/wADBMoEygkAAAMEygTLCQEAAwTKBMwJAgADBMoEzQkDAAMEygTOCQQAAgTKCQUAAwTLBMoJBgADBMsEywkHAAMEywTMCQgAAwTLBM0JCQADBMsEzgkKAAIEywkLAAMEzATKCQwAAwTMBMsJDQADBMwEzAkOAAMEzATNCQ8AAwTMBM4JEAACBMwJEQADBM0EygkSAAMEzQTLCRMAAwTNBMwJFAADBM0EzQkVAAMEzQTOCRYAAgTNCRcAAwTOBMoJGAADBM4EywkZAAMEzgTMCRoAAwTOBM0AAgABBMoEzgAAAAYAAAABAAgAAwAAAAECLgABABIAAQAAAAMAAgAQAmACYgAAAowCjwADA3MDcwAHBOQE9AAIBPoE+gAZBRwFHgAaBSEFIwAdBSUFJQAgBSkFKwAhBS8FMQAkBTYFNgAnBToFOgAoBUIFTgApBkgGSQA2BksGUQA4BlMGUwA/AAQAAAABAAgAAQGSAAMADABuAQAACAASABwAJgAwADoARABOAFgJGwAEBOUE8wJgCRwABATlBPMCYQkdAAQE5QT0AmAJHgAEBOUE9AJhCR8ABATnBPMCYAkgAAQE5wTzAmEJIQAEBOcE9AJgCSIABATnBPQCYQAMABoAJAAuADgAQgBMAFYAYABqAHQAfgCICSMABATlBPMCYAkkAAQE5QTzAmEJJQAEBOUE9AJgCSYABATlBPQCYQknAAQE5wTzAmAJKAAEBOcE8wJhCSkABATnBPQCYAkqAAQE5wT0AmEJMwAEBOkE5QJgCTQABATpBOUCYQk1AAQE6QTnAmAJNgAEBOkE5wJhAAwAGgAkAC4AOABCAEwAVgBgAGoAdAB+AIgJKwAEBOUE8wJgCSwABATlBPMCYQktAAQE5QT0AmAJLgAEBOUE9AJhCS8ABATnBPMCYAkwAAQE5wTzAmEJMQAEBOcE9AJgCTIABATnBPQCYQk3AAQE6QTlAmAJOAAEBOkE5QJhCTkABATpBOcCYAk6AAQE6QTnAmEAAQADAX4BhgGSAAEAAAABAAgAAgAMAAMA8wI3BgMAAQADAEwATQRXAAEAAAABAAgAAgAKAAILYAtlAAEAAgmfCaMABAAAAAEACAABAqYAOAB2AIAAigCUAJ4AqACyALwAxgDQANoA5ADuAPgBAgEMARYBIAEqATQBPgFIAVIBXAFmAXABegGEAY4BmAGiAawBtgHAAcoB1AHeAegB8gH8AgYCEAIaAiQCLgI4AkICTAJWAmACagJ0An4CiAKSApwAAQAECeIAAgmnAAEABAnjAAIJpwABAAQJ5AACCacAAQAECeUAAgmnAAEABAnmAAIJpwABAAQJ5wACCacAAQAECegAAgmnAAEABAnpAAIJpwABAAQJ6gACCacAAQAECesAAgmnAAEABAnsAAIJpwABAAQJ7QACCacAAQAECe4AAgmnAAEABAnvAAIJpwABAAQJ8AACCacAAQAECfEAAgmnAAEABAnyAAIJpwABAAQJvgACCacAAQAECb8AAgmnAAEABAnAAAIJpwABAAQJ9gACCacAAQAECfcAAgmnAAEABAn4AAIJpwABAAQJ+QACCacAAQAECcEAAgmnAAEABAn6AAIJpwABAAQJ+wACCacAAQAECfwAAgmnAAEABAn9AAIJpwABAAQJwgACCacAAQAECcMAAgmnAAEABAn+AAIJpwABAAQJ/wACCacAAQAECgAAAgmnAAEABAoBAAIJpwABAAQKAgACCacAAQAECZYAAgmnAAEABAoDAAIJpwABAAQJxAACCacAAQAECgQAAgmnAAEABAoFAAIJpwABAAQKBgACCacAAQAECcUAAgmnAAEABAmeAAIJpwABAAQKBwACCacAAQAECaEAAgmnAAEABAoIAAIJpwABAAQKCQACCacAAQAECgoAAgmnAAEABAoLAAIJpwABAAQKDAACCacAAQAECfMAAgmnAAEABAn0AAIJpwABAAQJ9QACCacAAQAEC2EAAgmnAAEABAtnAAIJpwACAAgJcQmVAAAJlwmdACUJnwmgACwJogmmAC4JxgnHADMJ2AnYADULYAtgADYLZQtlADcABAAAAAEACAABACIAAgAKABYAAQAECg0AAwm4CaQAAQAECg4AAwm4CYsAAQACCYIJiQAEAAAAAQAIAAEELgABAAgAAQAECg8AAgm4AAQAAAABAAgAAQ80AEoAmgCmALIAvgDKANYA4gDuAPoBBgESAR4BKgE2AUIBTgFaAWYBcgF+AYoBlgGiAa4BugHGAdIB3gHqAfYCAgIOAhoD4gImAjICPgJKAlYCYgJuAnoChgKSAp4CqgK2AsICzgLaAuYC8gL+AwoDFgMiAy4DOgNGA1IDXgNqA3YDggPuA44DmgOmA7IDvgPKA9YD4gPuAAEABApXAAMJuAmdAAEABApYAAMJuAmdAAEABApZAAMJuAmdAAEABApaAAMJuAmdAAEABApbAAMJuAmdAAEABApcAAMJuAmdAAEABApdAAMJuAmdAAEABApeAAMJuAmdAAEABApfAAMJuAmdAAEABApgAAMJuAmdAAEABAphAAMJuAmdAAEABApiAAMJuAmdAAEABApjAAMJuAmdAAEABApkAAMJuAmdAAEABAplAAMJuAmdAAEABApmAAMJuAmdAAEABApnAAMJuAmdAAEABApoAAMJuAmdAAEABAppAAMJuAmdAAEABApqAAMJuAmdAAEABAqOAAMJuAmdAAEABAprAAMJuAmdAAEABApsAAMJuAmdAAEABAptAAMJuAmdAAEABApuAAMJuAmdAAEABApvAAMJuAmdAAEABApwAAMJuAmdAAEABApxAAMJuAmdAAEABAqVAAMJuAmdAAEABApyAAMJuAmdAAEABApzAAMJuAmdAAEABAqXAAMJuAmdAAEABAp0AAMJuAmdAAEABAp2AAMJuAmdAAEABAp3AAMJuAmdAAEABAp4AAMJuAmdAAEABAp7AAMJuAmdAAEABAp8AAMJuAmdAAEABAp9AAMJuAmdAAEABAqCAAMJuAmdAAEABAqHAAMJuAmdAAEABAqIAAMJuAmdAAEABAqQAAMJuAmdAAEABAqUAAMJuAmdAAEABAp+AAMJuAmdAAEABAp/AAMJuAmdAAEABAqAAAMJuAmdAAEABAqBAAMJuAmdAAEABAqDAAMJuAmdAAEABAqEAAMJuAmdAAEABAqFAAMJuAmdAAEABAqGAAMJuAmdAAEABAqJAAMJuAmdAAEABAqKAAMJuAmdAAEABAqLAAMJuAmdAAEABAqMAAMJuAmdAAEABAqNAAMJuAmdAAEABAqPAAMJuAmdAAEABAqRAAMJuAmdAAEABAqSAAMJuAmdAAEABAqTAAMJuAmdAAEABAqWAAMJuAmdAAEABAqYAAMJuAmdAAEABAqaAAMJuAmdAAEABAqbAAMJuAmdAAEABAqcAAMJuAmdAAEABAp5AAMJuAmdAAEABAp6AAMJuAmdAAEABAtiAAMJuAmdAAEABAtjAAMJuAmdAAEABAp1AAMJuAmdAAEABAqZAAMJuAmdAAQAAAABAAgAAQASAAEACAABAAQKEAACCbgAAQABCZ0ABQAAAAEACAABAA4ABAAaABoAGgAaAAEABAnZCdoJ3AndAAEABAADAAEJuAmdAAEACwAEAAAAAQAIAAEAEgABAAgAAQAEChAAAgmdAAEAAQm4AAQAAAABAAgAAQrAAEoBggGMAZYBoACaAbYBwAHKAdQB3gCkAK4AuADCAhgCIgIsAMwCQgJMAlYCYAJqAnQCfgKIApICnAKoBEQCsgK8AsYC0ALaAuQC7gL4AwIDDAMWajpqRAM4A0IDTGpOA2IDbAN2A4BqWGpiA6IDrAO2ANYDzAPWA+AD6gP0BE4D/gQIBBIEHAQmBDAEOgREBE4EWARiAAEABAoVAAIJuAABAAQKGwACCbgAAQAEChwAAgm4AAEABAodAAIJuAABAAQKHgACCbgAAQAECiIAAgm4AAEABApGAAIJuAAEAAAAAQAIAAEJ2ABKAJoApACuALgAwgDOANgA4gDsAPYBAAEMARgBJAEwAToBRAFOAVoBZAFuAXgBggGMAZYBoAGqAbQBwANcAcoB1AHeAegB8gH8AgYCEAIaAiQCLgI4AkQCUAJaAmQCbgJ6AoQCjgKYAqICrgK6AsQCzgLYAuQC7gL4AwIDDANmAxYDIAMqAzQDPgNIA1IDXANmA3ADegABAAQKEQACCbgAAQAEChIAAgm4AAEABAoTAAIJuAABAAQKFAACCbgAAQAEChUAAwm4CeAAAQAEChYAAgm4AAEABAoXAAIJuAABAAQKGAACCbgAAQAEChkAAgm4AAEABAoaAAIJuAABAAQKGwADCbgJ4AABAAQKHAADCbgJ4AABAAQKHQADCbgJ4AABAAQKHgADCbgJ4AABAAQKHwACCbgAAQAECiAAAgm4AAEABAohAAIJuAABAAQKIgADCbgJ4AABAAQKIwACCbgAAQAECiQAAgm4AAEABApIAAIJuAABAAQKJQACCbgAAQAECiYAAgm4AAEABAonAAIJuAABAAQKKAACCbgAAQAECikAAgm4AAEABAoqAAIJuAABAAQKKwADCbgJ4AABAAQKKwACCbgAAQAECi0AAgm4AAEABApRAAIJuAABAAQKLgACCbgAAQAECi8AAgm4AAEABAowAAIJuAABAAQKMQACCbgAAQAECjIAAgm4AAEABAo1AAIJuAABAAQKNgACCbgAAQAECjcAAgm4AAEABAo8AAIJuAABAAQKQQADCbgJ4AABAAQKQgADCbgJ4AABAAQKSgACCbgAAQAECk4AAgm4AAEABAo4AAIJuAABAAQKOQADCbgJ4AABAAQKOgACCbgAAQAECjsAAgm4AAEABAo9AAIJuAABAAQKPgACCbgAAQAECj8AAwm4CeAAAQAECkAAAwm4CeAAAQAECkMAAgm4AAEABApEAAIJuAABAAQKRQACCbgAAQAECkYAAwm4CeAAAQAECkcAAgm4AAEABApJAAIJuAABAAQKSwACCbgAAQAECkwAAgm4AAEABApNAAIJuAABAAQKUgACCbgAAQAEClMAAgm4AAEABApUAAIJuAABAAQKVQACCbgAAQAEClYAAgm4AAEABAozAAIJuAABAAQKNAACCbgAAQAECiwAAgm4AAEABApQAAIJuAABAAQLZgACCbgAAQAEC2gAAgm4AAQAAAABAAgAAQKgADoAegCEAI4AmACiAKwAtgDAAMoA1ADeAOgA8gD8AQYBEAEaASQBLgE4AlACjAFCAUwBVgFgAWoBdAF+AYgBkgGcAaYBsAG6AcQBzgHYAeIB7AH2AgACCgIUAh4CKAIyAjwCRgJQApYCWgJkAm4CeAKCAowClgABAAQKnQACCbgAAQAECp4AAgm4AAEABAqfAAIJuAABAAQKoAACCbgAAQAECqMAAgm4AAEABAqkAAIJuAABAAQKpQACCbgAAQAECqYAAgm4AAEABAqnAAIJuAABAAQKsAACCbgAAQAECrEAAgm4AAEABAqyAAIJuAABAAQKtAACCbgAAQAECrUAAgm4AAEABAq2AAIJuAABAAQKtwACCbgAAQAECrgAAgm4AAEABAq5AAIJuAABAAQKugACCbgAAQAECrsAAgm4AAEABAq+AAIJuAABAAQKvwACCbgAAQAECsAAAgm4AAEABArBAAIJuAABAAQKwgACCbgAAQAECsQAAgm4AAEABArFAAIJuAABAAQKxgACCbgAAQAECscAAgm4AAEABArIAAIJuAABAAQKyQACCbgAAQAECswAAgm4AAEABArNAAIJuAABAAQKzgACCbgAAQAECs8AAgm4AAEABArQAAIJuAABAAQK2QACCbgAAQAECtoAAgm4AAEABArbAAIJuAABAAQK3QACCbgAAQAECt4AAgm4AAEABArfAAIJuAABAAQK4AACCbgAAQAECuEAAgm4AAEABAriAAIJuAABAAQK4wACCbgAAQAECuQAAgm4AAEABAq8AAIJuAABAAQK5wACCbgAAQAECugAAgm4AAEABArpAAIJuAABAAQK6gACCbgAAQAECusAAgm4AAEABAq9AAIJuAABAAQK5gACCbgAAgAJClcKWgAAClwKYAAECmUKZwAJCmkKdwAMCnkKfgAbCoAKhAAhCokKiwAmCo0KmwApC2ILYwA4AAQAAAABAAgAAQNqAEoAmgCkAK4AuADCAMwA1gDgAOoA9AD+AQgBEgEcASYBMAE6AUQBTgFYAWIBbAF2AYABigGUAZ4BqAGyAbwBxgHQAdoDVgHkAe4B+AICAgwCFgIgAioCNAI+AkgCUgJcAmYCcAJ6AoQCjgKYAqICrAK2AsACygLUAt4C6ALyAvwDBgNgAxADGgMkAy4DOANCA0wDVgNgAAEABApXAAIKEAABAAQKWAACChAAAQAEClkAAgoQAAEABApaAAIKEAABAAQKWwACChAAAQAEClwAAgoQAAEABApdAAIKEAABAAQKXgACChAAAQAECl8AAgoQAAEABApgAAIKEAABAAQKYQACChAAAQAECmIAAgoQAAEABApjAAIKEAABAAQKZAACChAAAQAECmUAAgoQAAEABApmAAIKEAABAAQKZwACChAAAQAECmgAAgoQAAEABAppAAIKEAABAAQKagACChAAAQAECo4AAgoQAAEABAprAAIKEAABAAQKbAACChAAAQAECm0AAgoQAAEABApuAAIKEAABAAQKbwACChAAAQAECnAAAgoQAAEABApxAAIKEAABAAQKlQACChAAAQAECnIAAgoQAAEABApzAAIKEAABAAQKlwACChAAAQAECnQAAgoQAAEABAp2AAIKEAABAAQKdwACChAAAQAECngAAgoQAAEABAp7AAIKEAABAAQKfAACChAAAQAECn0AAgoQAAEABAqCAAIKEAABAAQKhwACChAAAQAECogAAgoQAAEABAqQAAIKEAABAAQKlAACChAAAQAECn4AAgoQAAEABAp/AAIKEAABAAQKgAACChAAAQAECoEAAgoQAAEABAqDAAIKEAABAAQKhAACChAAAQAECoUAAgoQAAEABAqGAAIKEAABAAQKiQACChAAAQAECooAAgoQAAEABAqLAAIKEAABAAQKjAACChAAAQAECo0AAgoQAAEABAqPAAIKEAABAAQKkQACChAAAQAECpIAAgoQAAEABAqTAAIKEAABAAQKlgACChAAAQAECpgAAgoQAAEABAqaAAIKEAABAAQKmwACChAAAQAECpwAAgoQAAEABAp5AAIKEAABAAQKegACChAAAQAEC2IAAgoQAAEABAtjAAIKEAABAAQKdQACChAAAQAECpkAAgoQAAIABgmCCaYAAAm+CcUAJQn2Cg4ALQtgC2EARgtlC2UASAtnC2cASQAEAAAAAQAIAAEDRgBHAJQAngCoALIAvADGANAA2gDkAO4A+AECAQwBFgEgASoBNAE+AUgBUgFcAWYBcAF6AYQBjgGYAaIBrAG2AzIBwAHKAdQB3gHoAfIB/AIGAhACGgIkAi4COAJCAkwCVgJgAmoCdAJ+AogCkgKcAqYCsAK6AsQCzgLYAuIC7AL2AwADCgM8AxQDHgMoAzIDPAABAAQKnQACChAAAQAECp4AAgoQAAEABAqfAAIKEAABAAQKoAACChAAAQAECqEAAgoQAAEABAqjAAIKEAABAAQKpAACChAAAQAECqUAAgoQAAEABAqmAAIKEAABAAQKpwACChAAAQAECqgAAgoQAAEABAqqAAIKEAABAAQKrAACChAAAQAECq4AAgoQAAEABAqwAAIKEAABAAQKsQACChAAAQAECrIAAgoQAAEABAqzAAIKEAABAAQKtAACChAAAQAECrUAAgoQAAEABAq2AAIKEAABAAQKtwACChAAAQAECrgAAgoQAAEABAq5AAIKEAABAAQKugACChAAAQAECrsAAgoQAAEABAq8AAIKEAABAAQKvQACChAAAQAECr4AAgoQAAEABAq/AAIKEAABAAQKwQACChAAAQAECsIAAgoQAAEABArDAAIKEAABAAQKxAACChAAAQAECsUAAgoQAAEABArGAAIKEAABAAQKxwACChAAAQAECsgAAgoQAAEABArJAAIKEAABAAQKygACChAAAQAECswAAgoQAAEABArNAAIKEAABAAQKzgACChAAAQAECs8AAgoQAAEABArQAAIKEAABAAQK0QACChAAAQAECtMAAgoQAAEABArVAAIKEAABAAQK1wACChAAAQAECtkAAgoQAAEABAraAAIKEAABAAQK2wACChAAAQAECtwAAgoQAAEABArdAAIKEAABAAQK3gACChAAAQAECt8AAgoQAAEABArgAAIKEAABAAQK4QACChAAAQAECuIAAgoQAAEABArjAAIKEAABAAQK5AACChAAAQAECuYAAgoQAAEABArnAAIKEAABAAQK6AACChAAAQAECuoAAgoQAAEABArrAAIKEAABAAQK7AACChAAAQAECsAAAgoQAAEABArpAAIKEAACAAQKEQpOAAAKUApWAD4LZgtmAEULaAtoAEYABgAAAAEACAACAFBa4gAQABgAAgAAAC4AAQpxAAEAAQACAAMJrgmvAAEJuAm4AAEJyAnJAAEAAQAEAAAAAQABAAEAAQAAABIAAgAAAAEACAABAAgAAQAOAAEAAQpxAAIKKwmdAAQAAAABAAgAAQEWAAYAEgAeAFYAbACYAK4AAQAECzIAAwm4CZwABQAMABYAHgAoADALNQAECbgJjAmtCzQAAwm4CYwLNwAECbgJjQmtCzYAAwm4CY0LOAADCbgJnAACAAYADgs5AAMJuAmNCzoAAwm4CZwABAAKABQAHAAkCz0ABAm4CY4JrQs8AAMJuAmOCzsAAwm4CY8LPgADCbgJnAACAAYADgs/AAMJuAmPC0AAAwm4CZwACgAWAB4AJgAuADYAPgBGAE4AVgBeC0QAAwm4CYQLQwADCbgJhQtKAAMJuAmTC0gAAwm4CZQLRQADCbgJmQtGAAMJuAmaC0sAAwm4CZsLTAADCbgJnAtHAAMJuAmiC0kABAm4CiMJnAABAAYJhgmMCY0JjgmPCZMABAAAAAEACAABAbIADwAkAC4AOABCAHAAggCmALgAwgEWASABKgE0AVYBiAABAAQLMQACCZEAAQAECzIAAgmcAAEABAszAAIJnAAFAAwAFAAaACIAKAs1AAMJjAmtCzQAAgmMCzcAAwmNCa0LNgACCY0LOAACCZwAAgAGAAwLOQACCY0LOgACCZwABAAKABIAGAAeCz0AAwmOCa0LPAACCY4LOwACCY8LPgACCZwAAgAGAAwLPwACCY8LQAACCZwAAQAEC0EAAgmRAAoAFgAcACIAKAAuADQAOgBAAEYATAtEAAIJhAtDAAIJhQtKAAIJkwtIAAIJlAtFAAIJmQtGAAIJmgtLAAIJmwtMAAIJnAtHAAIJogtJAAMKIwmcAAEABAtOAAIJlQABAAQLTwACCZwABABqAHAAdgB8AAQACgAQABYAHAtWAAIJjAtYAAIJjQtXAAIKYQtZAAIKYgAGAA4AFAAaACAAJgAsC1oAAgmQC1sAAgmVC1wAAgmbC10AAgmcC14AAgmfC18AAgmiAAUADAASABgAHgAkC1AAAgmHC1UAAgmVC1QAAgmfC1IAAgmiC2QAAgtgAAEADwoRChUKFwobChwKHQoeCiAKIgolCi4KLwowCjILZgAEAAAAAQAIAAEANAAEAA4AGAAiACIAAQAEC0IAAgogAAEABAtNAAIKKgACAAYADAtRAAIKFgtTAAIKLgABAAQKIAoiCi8LZgAFAAAAAQAIAAIAjgAOAAMAAABOAAAAAgAKCW4JbgACCXEJcQABCXoJewABCX0JgQABCdgJ2AABCeIJ4gABCesJ7AABCe4J8gABCfUJ9QABCg8KDwACAAEABAACAAEAAgAAABcAAgAAAAEACAABACoAEgBSAFgAXgBkAGoAcAB2AHwAggCIAI4AlACaAKAApgCsALIAuAABABIJcQl6CXsJfQl+CX8JgAmBCdgJ4gnrCewJ7gnvCfAJ8QnyCfUAAglyCbEAAgl8CbAAAgl8CbEAAgl8CbIAAglzCbAAAglzCbEAAglzCbIAAglzCbMAAglyCbAAAgnjCbEAAgntCbAAAgntCbEAAgntCbIAAgnkCbAAAgnkCbEAAgnkCbIAAgnkCbMAAgnjCbAABAAAAAEACAABABoAAQAIAAIABgAMCxwAAgluCxwAAglvAAEAAQoPAAQAAAABAAgAAQIyABsAPABGAFgAYgBsAHYAgACKAJQAngDAAOIBBAEmAUgBagGMAa4B0AHaAewB9gIAAgoCFAIeAigAAQAECx0AAglvAAIABgAMCx4AAgluCx4AAglvAAEABAsfAAIJbwABAAQLIAACCW8AAQAECyEAAglvAAEABAsiAAIJbwABAAQLIwACCW8AAQAECyQAAglvAAEABAslAAIJbwAEAAoAEAAWABwL6AACCW4LAQACCW8LAgACCg8LAwACCxwABAAKABAAFgAcC+AAAgluCwQAAglvCwUAAgoPCwYAAgscAAQACgAQABYAHAvhAAIJbgsHAAIJbwsIAAIKDwsJAAILHAAEAAoAEAAWABwL4gACCW4LCgACCW8LCwACCg8LDAACCxwABAAKABAAFgAcC+MAAgluCw0AAglvCw4AAgoPCw8AAgscAAQACgAQABYAHAvkAAIJbgsQAAIJbwsRAAIKDwsSAAILHAAEAAoAEAAWABwL5QACCW4LEwACCW8LFAACCg8LFQACCxwABAAKABAAFgAcC+YAAgluCxYAAglvCxcAAgoPCxgAAgscAAQACgAQABYAHAvnAAIJbgsZAAIJbwsaAAIKDwsbAAILHAABAAQLJwACCW8AAgAGAAwLKAACCW4LKAACCW8AAQAECykAAglvAAEABAsqAAIJbwABAAQLKwACCW8AAQAECywAAglvAAEABAstAAIJbwABAAQLLgACCW8AAQAECy8AAglvAAEAGwlxCXUJegl7CX0Jfgl/CYAJgQmrCbAJsQmyCbMJtAm1CbYJtwniCeYJ6wnsCe4J7wnwCfEJ8gAFAAAAAQAIAAIAOgAOAAMAAAAeAAAAAgACCaoJqgABCeEJ4QACAAEABAACAAEAAAAAABsAAQAAAAEACAABAAYBvwABAAEJqgAFAAAAAQAIAAJCEgBYACgAAASOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACALMJggmCAAMJgwmDAAIJhAmIAAMJiQmLAAIJjAmPAAMJkAmQAAIJkQmmAAMJvgm+AAMJvwm/AAIJwAnAAAMJwQnBAAIJwgnFAAMJ2QnZAAMJ2gnaAAIJ3AndAAMJ4AngACcJ9gn5AAMJ+gn7AAIJ/An9AAMJ/gn+AAIJ/woMAAMKDQoNAAIKDgoOAAMKEwoTAAQKFAoUAAkKFgoWABIKGAoYACYKGQoZABwKGgoaACUKGwobACEKHgoeACUKHwofABoKIAogAAwKIQohABAKIgoiACIKIwojAAsKJAokAAoKJQolAAUKJwonABQKKAooABgKKQopAAYKKgoqAAcKKworABMKLAosACAKLgouABEKLwovABUKMAowAA0KMQoxABAKMgoyAB0KMwozABkKNAo0ABYKNwo3AAQKOAo4AAkKOgo6ABIKPAo8ACYKPQo9ABwKPgo+ACUKPwo/ACEKQgpCACUKQwpDABoKRApEAAwKRQpFABAKRgpGACIKRwpHAAsKSApIAAoKSQpJAAUKSwpLABQKTApMABgKTQpNAAYKTgpOAAcKTwpPABMKUApQACAKUgpSABEKUwpTABUKVApUAA0KVQpVABAKVgpWAB0KVwpXAAMKWApYAAIKWQpdAAMKXgpgAAIKYQpkAAMKZQplAAIKZgp2AAMKdwp3AAIKeAp4AAMKeQp6AAIKewp7AAMKfAp8AAIKfQqBAAMKggqEAAIKhQqIAAMKiQqJAAIKigqaAAMKmwqbAAIKnAqcAAMKnwqfAAgKoAqgAAkKowqjABIKqAqoACEKrgquACUKsAqwAB8KsQqxAA4KsgqyABAKswqzACIKtAq0AAsKtQq1AA8Ktgq2AAUKuAq4ABQKuQq5ACUKugq6ABcKuwq7AAcKvAq8ABMKvQq9ACAKvwq/ABEKwArAACQKwQrBAA0KwgrDACUKxQrFACUKyArIAAgKyQrJAAkKzArMABIK0QrRACEK1wrXACUK2QrZAB8K2graAA4K2wrbABAK3ArcACIK3QrdAAsK3greAA8K3wrfAAUK4QrhABQK4griACUK4wrjABcK5ArkAAcK5QrlABMK5grmACAK6AroABEK6QrpACQK6grqAA0K6wrsACULMQsxAAMLMgszAAILNAs0AAMLNgs2AAMLOAs4AAILOQs5AAMLOgs6AAILOws8AAMLPgs+AAILPws/AAMLQAtAAAILQQtBAAMLQgtCABsLQwtFAAMLRgtGAAILRwtHAAMLSAtJAAILSgtKAAMLSwtLAAILTAtMAAMLTQtNAB4LTgtOAAMLTwtPAAILUAtQAAMLUQtRACMLUgtSAAMLUwtTACMLVAtVAAILVgtbAAMLXAtdAAILXgtjAAMLZAtkAAILZQtlAAMLZgtmABULZwtnAAMLaAtoABULaQtpAAELxgvGABkASACSAJwApgCyAL4AygDWAOIA7gD6AQYBEgEeASoBNgFCAU4BWgFmAXIBfgGKAZYBogGuAboBxgHSAd4B6gH2AgICDgIaAiYCMgI+AkoCWAJmAnQCggKQAp4CrAK6AsgC1gLkAvIDAAMOAxwDKgM4A0YDVANiA3ADfgOMA5oDqAO2A8QD0gPgA+4D/AQKBBgEJgACAAEAAgAAAEAAAgABAAMAAABAAAMAAQAEAAMAAAAdAAMAAQAFAAMAAAAeAAMAAQAGAAMAAAAfAAMAAQAHAAMAAAAgAAMAAQAIAAMAAAAhAAMAAQAJAAMAAAAiAAMAAQAKAAMAAAAjAAMAAQALAAMAAAAkAAMAAQAMAAMAAAAlAAMAAQANAAMAAAAmAAMAAQAOAAMAAAAnAAMAAQAPAAMAAAAoAAMAAQAQAAMAAAApAAMAAQARAAMAAAAqAAMAAQASAAMAAAArAAMAAQATAAMAAAAsAAMAAQAUAAMAAAAtAAMAAQAVAAMAAAAuAAMAAQAWAAMAAAAvAAMAAQAXAAMAAAAwAAMAAQAYAAMAAAAxAAMAAQAZAAMAAAAyAAMAAQAaAAMAAAAzAAMAAQAbAAMAAAA0AAMAAQAcAAMAAAA1AAMAAQAdAAMAAAA2AAMAAQAeAAMAAAA3AAMAAQAfAAMAAAA4AAMAAQAgAAMAAAA5AAMAAQAhAAMAAAA6AAMAAQAiAAMAAAA7AAMAAQAjAAMAAAA8AAMAAQAkAAMAAAA9AAMAAQAlAAMAAAA+AAMAAQAmAAMAAAA/AAQAAQAEACcAAwAAAB0ABAABAAUAJwADAAAAHgAEAAEABgAnAAMAAAAfAAQAAQAHACcAAwAAACAABAABAAgAJwADAAAAIQAEAAEACQAnAAMAAAAiAAQAAQAKACcAAwAAACMABAABAAsAJwADAAAAJAAEAAEADAAnAAMAAAAlAAQAAQANACcAAwAAACYABAABAA4AJwADAAAAJwAEAAEADwAnAAMAAAAoAAQAAQAQACcAAwAAACkABAABABEAJwADAAAAKgAEAAEAEgAnAAMAAAArAAQAAQATACcAAwAAACwABAABABQAJwADAAAALQAEAAEAFQAnAAMAAAAuAAQAAQAWACcAAwAAAC8ABAABABcAJwADAAAAMAAEAAEAGAAnAAMAAAAxAAQAAQAZACcAAwAAADIABAABABoAJwADAAAAMwAEAAEAGwAnAAMAAAA0AAQAAQAcACcAAwAAADUABAABAB0AJwADAAAANgAEAAEAHgAnAAMAAAA3AAQAAQAfACcAAwAAADgABAABACAAJwADAAAAOQAEAAEAIQAnAAMAAAA6AAQAAQAiACcAAwAAADsABAABACMAJwADAAAAPAAEAAEAJAAnAAMAAAA9AAQAAQAlACcAAwAAAD4ABAABACYAJwADAAAAPwAFAAAAAQAIAAI5SAAcAAoAAAggAAAAAAAAAAAAAAAAAAAAAAACAHUJggmCAAUJhAmFAAYJhgmIAAcJjAmNAAQJjgmOAAcJjwmPAAQJkQmRAAYJkgmSAAcJkwmTAAQJlAmUAAcJlQmXAAYJmAmYAAUJmQmZAAYJmgmaAAgJmwmcAAYJnQmeAAMJnwmhAAgJogmiAAYJowmjAAgJpAmkAAYJpQmlAAgJpgmmAAUJvgm+AAUJwAnAAAYJwgnCAAcJwwnDAAQJxAnEAAUJxQnFAAYJ2QnZAAYJ3AncAAcJ3QndAAYJ4AngAAkJ9gn2AAYJ9wn5AAcJ/An9AAQJ/wn/AAYKAAoAAAcKAQoBAAQKAgoCAAcKAwoEAAYKBQoFAAgKBgoGAAYKBwoHAAgKCAoIAAYKCQoJAAgKCgoKAAYKCwoLAAgKDAoMAAUKDgoOAAcKEwoTAAIKNwo3AAIKVwpXAAUKWQpaAAYKWwpdAAcKYQpiAAQKYwpjAAcKZApkAAQKZgpmAAYKZwpnAAcKaApoAAQKaQppAAcKagprAAYKbApsAAUKbQptAAYKbgpuAAgKbwpwAAYKcQpxAAMKcgpzAAgKdAp0AAYKdQp1AAgKdgp2AAYKeAp4AAYKewp7AAUKfQp+AAYKfwqBAAcKhQqGAAQKhwqHAAcKiAqIAAQKigqKAAYKiwqLAAcKjAqMAAQKjQqNAAcKjgqPAAYKkAqQAAUKkQqRAAYKkgqSAAgKkwqUAAYKlQqVAAMKlgqXAAgKmAqYAAYKmQqZAAgKmgqaAAYKnAqcAAYLMQsxAAcLNAs0AAQLNgs2AAQLOQs5AAQLOws8AAcLPws/AAQLQQtBAAgLQwtDAAgLRAtEAAQLRQtFAAYLRwtHAAYLSgtKAAQLTAtMAAgLTgtOAAYLUAtQAAgLUgtSAAgLVgtZAAYLWgtaAAgLWwtbAAcLXgtfAAgLYAtjAAcLZQtlAAgLZwtnAAgLaQtpAAEABQAAAAEACAACNmIAHAAKAAAFOgAAAAAAAAAAAAAAAAAAAAAAAgBwCYIJggAGCYQJhAAGCYUJhgAHCYcJhwAICYgJiAAHCYwJjQAECY4JjgAHCY8JjwAECZEJkQAGCZIJkgAICZMJkwAECZQJlAAHCZUJmQAGCZsJmwAHCZwJnAAGCZ0JngADCZ8JoQAICaIJogAGCaMJowAICaQJpAAGCaUJpQAICaYJpgAFCb4JvgAGCcAJwAAGCcIJwgAHCcMJwwAECcQJxQAGCdkJ2QAGCdwJ3AAHCd0J3QAGCeAJ4AAJCfYJ9wAHCfgJ+AAICfkJ+QAHCfwJ/QAECf8J/wAGCgAKAAAICgEKAQAECgIKAgAHCgMKBAAGCgYKBgAHCgcKBwAICggKCAAGCgkKCQAICgoKCgAGCgsKCwAICgwKDAAFCg4KDgAICiUKJQACCkkKSQACClcKVwAGClkKWQAGCloKWwAHClwKXAAICl0KXQAHCmEKYgAECmMKYwAHCmQKZAAECmYKZgAGCmcKZwAICmgKaAAECmkKaQAHCmoKbQAGCm8KbwAHCnAKcAAGCnEKcQADCnMKcwAICnQKdAAGCnYKdgAGCngKeAAGCnsKewAGCn0KfQAGCn4KfwAHCoAKgAAICoEKgQAHCoUKhgAECocKhwAHCogKiAAECooKigAGCosKiwAICowKjAAECo0KjQAHCo4KkQAGCpMKkwAHCpQKlAAGCpUKlQADCpcKlwAICpgKmAAGCpoKmgAGCpwKnAAGCrYKtgACCt8K3wACCzELMQAICzQLNAAECzYLNgAECzkLOQAECzsLPAAHCz8LPwAEC0ELQQAIC0MLQwAIC0QLRAAEC0ULRQAGC0cLRwAGC0oLSgAFC0wLTAAIC04LTgAGC1YLWQAHC1sLWwAHC2ALYwAIC2ULZQAIC2cLZwAIC2kLaQABAAUAAAABAAgAAjOaABwACgAAAnIAAAAAAAAAAAAAAAAAAAAAAAIAYwmCCYIABgmECYQABgmFCYYABwmHCYgACAmMCY0ABAmOCY4ABwmPCY8ABQmRCZEABgmSCZIACAmTCZMABAmUCZQABwmVCZgABgmZCZkABwmbCZwABwmdCZ4AAwmiCaIABgmkCaQABwmlCaUACAmmCaYABgm+Cb4ABgnACcAABgnCCcIABwnDCcMABQnECcQABgnFCcUABwnZCdkABgncCd0ABwngCeAACQn2CfcABwn4CfkACAn8Cf0ABAn/Cf8ABgoACgAACAoBCgEABAoCCgIABwoDCgMABgoECgQABwoGCgYABwoICggABgoKCgoABwoLCgsACAoMCgwABgoOCg4ACAopCikAAgpNCk0AAgpXClcABgpZClkABgpaClsABwpcCl0ACAphCmIABApjCmMABwpkCmQABQpmCmYABgpnCmcACApoCmgABAppCmkABwpqCmwABgptCm0ABwpvCnAABwpxCnEAAwp0CnQABgp2CnYABwp4CngABgp7CnsABgp9Cn0ABgp+Cn8ABwqACoEACAqFCoYABAqHCocABwqICogABQqKCooABgqLCosACAqMCowABAqNCo0ABwqOCpAABgqRCpEABwqTCpQABwqVCpUAAwqYCpgABgqaCpoABwqcCpwABgsxCzEACAs0CzQABAs2CzYABAs5CzkABAs7CzwABws/Cz8ABQtDC0MACAtEC0QABAtFC0UABwtHC0cABgtKC0oABQtOC04ABgtWC1kABwtbC1sABwtgC2MACAtlC2UACAtnC2cACAtpC2kAAQAMABoAJgAyAD4ASgBWAGIAcAB+AIwAmgCoAAMAAQACAAMAAABGAAMAAQACAAQAAABHAAMAAQACAAUAAABIAAMAAQACAAYAAABJAAMAAQACAAcAAABKAAMAAQACAAgAAABLAAQAAQACAAkAAwAAAEYABAABAAIACQAEAAAARwAEAAEAAgAJAAUAAABIAAQAAQACAAkABgAAAEkABAABAAIACQAHAAAASgAEAAEAAgAJAAgAAABLAAUAAAABAAgAAjBqABoACQAAAnYAAAAAAAAAAAAAAAAAAAACAGQJggmCAAUJhAmFAAYJhgmIAAcJjAmNAAMJjgmOAAcJjwmPAAQJkQmRAAYJkgmSAAcJkwmTAAQJlAmUAAcJlQmWAAUJlwmXAAYJmAmYAAUJmQmZAAYJmwmcAAYJnQmeAAMJogmiAAUJpAmkAAYJpgmmAAUJvgm+AAUJwAnAAAYJwgnCAAcJwwnDAAQJxAnEAAUJxQnFAAYJ2QnZAAYJ3AncAAcJ3QndAAYJ4AngAAgJ9gn2AAYJ9wn5AAcJ/An9AAMJ/wn/AAYKAAoAAAcKAQoBAAQKAgoCAAcKAwoEAAYKBgoGAAYKCAoIAAUKCgoKAAYKDAoMAAUKDgoOAAcKKgoqAAIKTgpOAAIKVwpXAAUKWQpaAAYKWwpdAAcKYQpiAAMKYwpjAAcKZApkAAQKZgpmAAUKZwpnAAcKaApoAAQKaQppAAcKagpqAAUKawprAAYKbApsAAUKbQptAAYKbwpwAAYKcQpxAAMKdAp0AAUKdgp2AAYKeAp4AAUKewp7AAUKfQp+AAYKfwqBAAcKhQqGAAMKhwqHAAcKiAqIAAQKigqKAAUKiwqLAAcKjAqMAAQKjQqNAAcKjgqOAAUKjwqPAAYKkAqQAAUKkQqRAAYKkwqUAAYKlQqVAAMKmAqYAAUKmgqaAAYKnAqcAAUKuwq7AAIK5ArkAAILMQsxAAcLNAs0AAMLNgs2AAMLOQs5AAMLOws8AAcLPws/AAQLQwtDAAcLRAtEAAQLRQtFAAYLRwtHAAYLSgtKAAQLTgtOAAYLVgtZAAYLWwtbAAcLYAtjAAcLaQtpAAEABQ1UDWANbA14DYQABQAAAAEACAACLeAAGgAJAAANKgAAAAAAAAAAAAAAAAAAAAIAXgmCCYIABQmECYUABgmGCYgABwmMCY0ABAmOCY4ABwmPCY8ABAmRCZEABgmSCZIABwmTCZMABAmUCZQABwmVCZcABgmYCZgABQmZCZkABgmbCZwABgmdCZ4AAwmiCaIABgmkCaQABgmmCaYABQm+Cb4ABQnACcAABgnCCcIABwnDCcMABAnECcQABQnFCcUABgnZCdkABgncCdwABwndCd0ABgngCeAACAn2CfYABgn3CfkABwn8Cf0ABAn/Cf8ABgoACgAABwoBCgEABAoCCgIABwoDCgQABgoGCgYABgoICggABgoKCgoABgoMCgwABQoOCg4ABwpXClcABQpZCloABgpbCl0ABwphCmIABApjCmMABwpkCmQABApmCmYABQpnCmcABwpoCmgABAppCmkABwpqCmsABgpsCmwABQptCm0ABgpvCnAABgpxCnEAAwp0CnQABgp2CnYABgp4CngABgp7CnsABQp9Cn4ABgp/CoEABwqFCoYABAqHCocABwqICogABAqKCooABQqLCosABwqMCowABAqNCo0ABwqOCo8ABgqQCpAABQqRCpEABgqTCpQABgqVCpUAAwqYCpgABgqaCpoABgqcCpwABgqfCp8AAgrICsgAAgsxCzEABws0CzQABAs2CzYABAs5CzkABAs7CzwABws/Cz8ABAtEC0QABAtFC0UABgtHC0cABgtKC0oABAtOC04ABgtWC1kABgtbC1sABwtgC2MABwtpC2kAAQAFAAAAAQAIAAIrhgAaAAkAAArQAAAAAAAAAAAAAAAAAAAAAgBXCYIJggAGCYQJhAAGCYUJiAAHCYwJjQAECY4JjgAHCY8JjwAECZEJkQAGCZMJkwAECZQJlAAHCZUJmQAGCZsJmwAHCZwJnAAGCZ0JngADCaIJogAGCaQJpAAGCaYJpgAFCb4JvgAGCcAJwAAGCcIJwgAHCcMJwwAECcQJxQAGCdkJ2QAGCdwJ3AAHCd0J3QAGCeAJ4AAICfYJ+QAHCfwJ/QAECf8J/wAGCgEKAQAECgIKAgAHCgMKBAAGCgYKBgAHCggKCAAGCgoKCgAGCgwKDAAFCg4KDgAHChQKFAACCjgKOAACClcKVwAGClkKWQAGCloKXQAHCmEKYgAECmMKYwAHCmQKZAAECmYKZgAGCmgKaAAECmkKaQAHCmoKbQAGCm8KbwAHCnAKcAAGCnEKcQADCnQKdAAGCnYKdgAGCngKeAAGCnsKewAGCn0KfQAGCn4KgQAHCoUKhgAECocKhwAHCogKiAAECooKigAGCowKjAAECo0KjQAHCo4KkQAGCpMKkwAHCpQKlAAGCpUKlQADCpgKmAAGCpoKmgAGCpwKnAAGCqAKoAACCskKyQACCzQLNAAECzYLNgAECzkLOQAECzsLPAAHCz8LPwAEC0QLRAAEC0ULRQAGC0cLRwAGC0oLSgAEC04LTgAGC1YLVwAGC1gLWQAHC1sLWwAHC2ALYwAHC2kLaQABAAUAAAABAAgAAilWABoACQAACKAAAAAAAAAAAAAAAAAAAAACAFoJggmCAAUJhAmEAAYJhQmGAAcJiAmIAAYJjAmMAAMJjQmNAAQJjgmOAAcJjwmPAAQJkQmRAAUJkwmTAAMJlAmUAAcJlQmWAAYJlwmYAAUJmQmZAAYJmwmbAAcJnAmcAAUJnQmeAAMJogmiAAUJpAmkAAYJpQmlAAcJpgmmAAQJvgm+AAYJwAnAAAYJwgnCAAcJwwnDAAQJxAnFAAYJ2QnZAAYJ3AncAAcJ3QndAAYJ4AngAAgJ9gn3AAcJ+Qn5AAcJ/An9AAQJ/wn/AAYKAQoBAAQKAgoCAAcKAwoEAAYKBgoGAAcKCAoIAAYKCgoKAAYKDAoMAAUKJAokAAIKSApIAAIKVwpXAAYKWQpZAAYKWgpbAAcKXQpdAAcKYQpiAAQKYwpjAAcKZApkAAQKZgpmAAYKaApoAAMKaQppAAcKagptAAYKbwpvAAcKcApwAAYKcQpxAAMKdAp0AAYKdgp2AAYKeAp4AAYKewp7AAYKfQp9AAYKfgp/AAcKgQqBAAcKhQqGAAQKhwqHAAcKiAqIAAQKigqKAAYKjAqMAAQKjQqNAAcKjgqRAAYKkwqTAAcKlAqUAAYKlQqVAAMKmAqYAAYKmgqaAAYKnAqcAAYLNAs0AAQLNgs2AAQLOQs5AAQLOws8AAcLPws/AAQLRAtEAAQLRQtFAAYLRwtHAAMLSgtKAAULTgtOAAYLVgtZAAcLWwtbAAcLaQtpAAEABQAAAAEACAACJxQAGgAJAAAGXgAAAAAAAAAAAAAAAAAAAAIAVgmCCYIABgmECYQABgmFCYYABwmICYgABwmMCY0ABAmOCY4ABwmPCY8ABAmRCZEABgmTCZMABAmUCZQABwmVCZkABgmbCZwABwmdCZ4AAwmiCaIABgmkCaQABgmmCaYABgm+Cb4ABgnACcAABgnCCcIABwnDCcMABAnECcQABgnFCcUABwnZCdkABgncCdwABwndCd0ABgngCeAACAn2CfcABwn5CfkABwn8Cf0ABAn/Cf8ABgoBCgEABAoCCgIABwoDCgQABgoGCgYABwoICggABgoKCgoABgoMCgwABgojCiMAAgpHCkcAAgpXClcABgpZClkABgpaClsABwpdCl0ABwphCmIABApjCmMABwpkCmQABApmCmYABgpoCmgABAppCmkABwpqCm0ABgpvCnAABwpxCnEAAwp0CnQABgp2CnYABgp4CngABgp7CnsABgp9Cn0ABgp+Cn8ABwqBCoEABwqFCoYABAqHCocABwqICogABAqKCooABgqMCowABAqNCo0ABwqOCpEABgqTCpQABwqVCpUAAwqYCpgABgqaCpoABgqcCpwABgq0CrQAAgrdCt0AAgs0CzQABAs2CzYABAs5CzkABAs7CzwABws/Cz8ABAtEC0QABAtFC0UABgtHC0cABgtKC0oABQtOC04ABgtWC1kABwtbC1sABwtpC2kAAQAFAAAAAQAIAAIk6gAaAAkAAAQ0AAAAAAAAAAAAAAAAAAAAAgBUCYIJggAFCYQJhAAGCYUJhgAHCYwJjQAECY4JjgAHCY8JjwAFCZEJkQAGCZMJkwAECZQJlAAHCZUJlgAGCZcJmAAFCZkJmQAGCZsJmwAHCZwJnAAGCZ0JngADCZ8JnwAHCaIJogAFCaQJpQAHCaYJpgAGCb4JvgAGCcAJwAAGCcIJwgAHCcMJwwAFCcQJxAAGCcUJxQAHCdkJ2QAGCdwJ3AAHCd0J3QAGCeAJ4AAICfYJ9wAHCfwJ/QAECf8J/wAGCgEKAQAECgIKAgAHCgMKBAAGCgYKBgAHCggKCAAGCgoKCgAHCgwKDAAGCiAKIAACCkQKRAACClcKVwAGClkKWQAGCloKWwAHCmEKYgAECmMKYwAHCmQKZAAFCmYKZgAGCmgKaAAECmkKaQAHCmoKbQAGCm8KcAAHCnEKcQADCnQKdAAGCnYKdgAHCngKeAAGCnsKewAGCn0KfQAGCn4KfwAHCoUKhgAECocKhwAHCogKiAAFCooKigAGCowKjAAECo0KjQAHCo4KkQAGCpMKlAAHCpUKlQADCpgKmAAGCpoKmgAHCpwKnAAGCzQLNAAECzYLNgAECzkLOQAECzsLPAAHCz8LPwAFC0QLRAAEC0ULRQAHC0cLRwAGC0oLSgAFC04LTgAGC1YLWQAHC1sLWwAHC2kLaQABAAUAAAABAAgAAiLMABoACQAAAhYAAAAAAAAAAAAAAAAAAAACAFQJggmCAAUJhAmEAAYJhQmFAAcJjAmNAAQJjwmPAAUJkQmRAAcJkwmTAAUJlAmUAAcJlQmXAAYJmAmYAAUJmQmZAAcJmwmbAAcJnAmcAAYJnQmeAAMJogmiAAYJpAmkAAYJpgmmAAYJvgm+AAYJwAnAAAYJwwnDAAUJxAnEAAYJxQnFAAcJ2QnZAAYJ3QndAAcJ4AngAAgJ9gn2AAcJ/An9AAQJ/wn/AAcKAQoBAAUKAgoEAAcKBgoGAAcKCAoIAAYKCgoKAAcKDAoMAAYKMAowAAIKVApUAAIKVwpXAAYKWQpZAAYKWgpaAAcKYQpiAAQKZApkAAUKZgpmAAYKaApoAAUKaQppAAcKagpqAAYKawprAAcKbApsAAYKbQptAAcKbwpwAAcKcQpxAAMKdAp0AAYKdgp2AAcKeAp4AAYKewp7AAYKfQp9AAYKfgp+AAcKhQqGAAQKiAqIAAUKigqKAAYKjAqMAAUKjQqNAAcKjgqOAAYKjwqPAAcKkAqQAAYKkQqRAAcKkwqUAAcKlQqVAAMKmAqYAAYKmgqaAAcKnAqcAAYKwQrBAAIK6grqAAILNAs0AAQLNgs2AAQLOQs5AAQLPws/AAULRAtEAAULRQtFAAcLRwtHAAYLSgtKAAULTgtOAAcLVgtZAAcLWwtbAAcLaQtpAAEACgAWACIALgA6AEYAUgBgAG4AfACKAAMAAQACAAMAAABHAAMAAQACAAQAAABIAAMAAQACAAUAAABJAAMAAQACAAYAAABKAAMAAQACAAcAAABLAAQAAQACAAgAAwAAAEcABAABAAIACAAEAAAASAAEAAEAAgAIAAUAAABJAAQAAQACAAgABgAAAEoABAABAAIACAAHAAAASwAFAAAAAQAIAAIgFgAYAAgAAAHeAAAAAAAAAAAAAAAAAAIASwmCCYIABQmECYUABgmMCY0ABAmPCY8ABAmRCZEABgmTCZMABAmVCZYABQmXCZcABgmYCZgABQmZCZkABgmbCZwABgmdCZ4AAwmiCaIABQmkCaQABgmmCaYABQm+Cb4ABQnACcAABgnDCcMABAnECcQABQnFCcUABgnZCdkABgndCd0ABgngCeAABwn2CfYABgn8Cf0ABAn/Cf8ABgoBCgEABAoDCgQABgoGCgYABgoICggABQoKCgoABgoMCgwABQpXClcABQpZCloABgphCmIABApkCmQABApmCmYABQpoCmgABApqCmoABQprCmsABgpsCmwABQptCm0ABgpvCnAABgpxCnEAAwp0CnQABQp2CnYABgp4CngABQp7CnsABQp9Cn4ABgqFCoYABAqICogABAqKCooABQqMCowABAqOCo4ABQqPCo8ABgqQCpAABQqRCpEABgqTCpQABgqVCpUAAwqYCpgABQqaCpoABgqcCpwABQqxCrEAAgraCtoAAgs0CzQABAs2CzYABAs5CzkABAs/Cz8ABAtEC0QABAtFC0UABgtHC0cABgtKC0oABAtOC04ABgtWC1kABgtpC2kAAQAEDrAOvA7IDtQABQAAAAEACAACHiYAGAAIAAAOjAAAAAAAAAAAAAAAAAACAEsJggmCAAUJhAmFAAYJjAmNAAQJjwmPAAQJkQmRAAYJkwmTAAQJlQmWAAUJlwmXAAYJmAmYAAUJmQmZAAYJmwmcAAYJnQmeAAMJogmiAAUJpAmkAAYJpgmmAAUJvgm+AAUJwAnAAAYJwwnDAAQJxAnEAAUJxQnFAAYJ2QnZAAYJ3QndAAYJ4AngAAcJ9gn2AAYJ/An9AAQJ/wn/AAYKAQoBAAQKAwoEAAYKBgoGAAYKCAoIAAUKCgoKAAYKDAoMAAUKVwpXAAUKWQpaAAYKYQpiAAQKZApkAAQKZgpmAAUKaApoAAQKagpqAAUKawprAAYKbApsAAUKbQptAAYKbwpwAAYKcQpxAAMKdAp0AAUKdgp2AAYKeAp4AAUKewp7AAUKfQp+AAYKhQqGAAQKiAqIAAQKigqKAAUKjAqMAAQKjgqOAAUKjwqPAAYKkAqQAAUKkQqRAAYKkwqUAAYKlQqVAAMKmAqYAAUKmgqaAAYKnAqcAAUKtQq1AAIK3greAAILNAs0AAQLNgs2AAQLOQs5AAQLPws/AAQLRAtEAAQLRQtFAAYLRwtHAAYLSgtKAAQLTgtOAAYLVgtZAAYLaQtpAAEABQAAAAEACAACHEAAGAAIAAAMpgAAAAAAAAAAAAAAAAACAEwJggmCAAUJhAmFAAYJjAmNAAQJjwmPAAQJkQmRAAYJkwmTAAQJlQmXAAYJmAmYAAUJmQmZAAYJmwmcAAYJnQmeAAMJogmiAAYJpAmkAAYJpgmmAAUJvgm+AAUJwAnAAAYJwwnDAAQJxAnEAAUJxQnFAAYJ2QnZAAYJ3QndAAYJ4AngAAcJ9gn2AAYJ/An9AAQJ/wn/AAYKAQoBAAQKAwoEAAYKBgoGAAYKCAoIAAYKCgoKAAYKDAoMAAUKIQohAAIKMQoxAAIKRQpFAAIKVQpVAAIKVwpXAAUKWQpaAAYKYQpiAAQKZApkAAQKZgpmAAUKaApoAAQKagprAAYKbApsAAUKbQptAAYKbwpwAAYKcQpxAAMKdAp0AAYKdgp2AAYKeAp4AAYKewp7AAUKfQp+AAYKhQqGAAQKiAqIAAQKigqKAAUKjAqMAAQKjgqPAAYKkAqQAAUKkQqRAAYKkwqUAAYKlQqVAAMKmAqYAAYKmgqaAAYKnAqcAAYKsgqyAAIK2wrbAAILNAs0AAQLNgs2AAQLOQs5AAQLPws/AAQLRAtEAAQLRQtFAAYLRwtHAAYLSgtKAAQLTgtOAAYLVgtZAAYLaQtpAAEABQAAAAEACAACGlQAGAAIAAAKugAAAAAAAAAAAAAAAAACAEoJggmCAAUJhAmFAAYJjAmNAAQJjwmPAAQJkQmRAAYJkwmTAAQJlQmXAAYJmAmYAAUJmQmZAAYJmwmcAAYJnQmeAAMJogmiAAUJpAmkAAYJpgmmAAUJvgm+AAUJwAnAAAYJwwnDAAQJxAnEAAUJxQnFAAYJ2QnZAAYJ3QndAAYJ4AngAAcJ9gn2AAYJ/An9AAQJ/wn/AAYKAQoBAAQKAwoEAAYKBgoGAAYKCAoIAAYKCgoKAAYKDAoMAAUKLgouAAIKUgpSAAIKVwpXAAUKWQpaAAYKYQpiAAQKZApkAAQKZgpmAAYKaApoAAQKagprAAYKbApsAAUKbQptAAYKbwpwAAYKcQpxAAMKdAp0AAYKdgp2AAYKeAp4AAYKewp7AAUKfQp+AAYKhQqGAAQKiAqIAAQKigqKAAYKjAqMAAQKjgqPAAYKkAqQAAUKkQqRAAYKkwqUAAYKlQqVAAMKmAqYAAYKmgqaAAYKnAqcAAYKvwq/AAIK6AroAAILNAs0AAQLNgs2AAQLOQs5AAQLPws/AAQLRAtEAAQLRQtFAAYLRwtHAAYLSgtKAAQLTgtOAAYLVgtZAAYLaQtpAAEABQAAAAEACAACGHQAGAAIAAAI2gAAAAAAAAAAAAAAAAACAC0JggmCAAUJhAmFAAYJjAmNAAQJjwmPAAQJkQmRAAYJkwmTAAQJlQmZAAYJnAmcAAYJnQmeAAMJogmiAAYJpAmkAAYJpgmmAAUJvgm+AAUJwAnAAAYJwwnDAAQJxAnFAAYJ2QnZAAYJ3QndAAYJ4AngAAcJ9gn2AAYJ/An9AAQJ/wn/AAYKAQoBAAQKAwoEAAYKCAoIAAYKCgoKAAYKDAoMAAUKFgoWAAIKOgo6AAIKVwpXAAUKWQpaAAYKYQpiAAQKZApkAAQKZgpmAAYKaApoAAQKagptAAYKcApwAAYKcQpxAAMKdAp0AAYKdgp2AAYKeAp4AAYKlQqVAAMKowqjAAIKzArMAAILaQtpAAEABQAAAAEACAACF0IAGAAIAAAHqAAAAAAAAAAAAAAAAAACAEIJggmCAAUJhAmFAAYJjAmNAAQJjwmPAAQJkQmRAAYJkwmTAAQJlQmZAAYJnAmcAAYJnQmeAAMJogmiAAYJpAmkAAYJpgmmAAUJvgm+AAUJwAnAAAYJwwnDAAQJxAnFAAYJ2QnZAAYJ3QndAAYJ4AngAAcJ9gn2AAYJ/An9AAQJ/wn/AAYKAQoBAAQKAwoEAAYKCAoIAAYKCgoKAAYKDAoMAAUKKworAAIKTwpPAAIKVwpXAAUKWQpaAAYKYQpiAAQKZApkAAQKZgpmAAYKaApoAAQKagptAAYKcApwAAYKcQpxAAMKdAp0AAYKdgp2AAYKeAp4AAYKewp7AAUKfQp+AAYKhQqGAAQKiAqIAAQKigqKAAYKjAqMAAQKjgqRAAYKlAqUAAYKlQqVAAMKmAqYAAYKmgqaAAYKnAqcAAYKvAq8AAIK5QrlAAILNAs0AAQLNgs2AAQLOQs5AAQLPws/AAQLRAtEAAQLRQtFAAYLRwtHAAYLSgtKAAQLTgtOAAYLVgtXAAYLaQtpAAEABQAAAAEACAACFZIAGAAIAAAF+AAAAAAAAAAAAAAAAAACAEMJggmCAAYJhAmEAAYJjAmNAAQJjwmPAAQJkQmRAAUJkwmTAAMJlQmYAAYJmQmZAAUJmwmbAAYJnAmcAAUJnQmeAAMJogmiAAUJpAmkAAYJpgmmAAUJvgm+AAYJwAnAAAYJwwnDAAQJxAnFAAYJ2QnZAAYJ3QndAAYJ4AngAAcJ/An9AAQJ/wn/AAYKAQoBAAQKAwoEAAYKCAoIAAYKCgoKAAYKDAoMAAUKJwonAAIKSwpLAAIKVwpXAAYKWQpZAAYKYQpiAAQKZApkAAQKZgpmAAYKaApoAAQKagptAAYKcApwAAYKcQpxAAMKdAp0AAYKdgp2AAYKeAp4AAYKewp7AAYKfQp9AAYKhQqGAAQKiAqIAAQKigqKAAYKjAqMAAQKjgqRAAYKlAqUAAYKlQqVAAMKmAqYAAYKmgqaAAYKnAqcAAYKuAq4AAIK4QrhAAILNAs0AAQLNgs2AAQLOQs5AAQLPws/AAQLRAtEAAQLRQtFAAYLRwtHAAYLSgtKAAQLTgtOAAYLVgtXAAYLaQtpAAEABQAAAAEACAACE9wAGAAIAAAEQgAAAAAAAAAAAAAAAAACAD0JggmCAAYJhAmEAAYJjAmNAAQJjwmPAAQJkQmRAAYJkwmTAAQJlQmZAAYJnQmeAAMJogmiAAYJpAmkAAYJpgmmAAYJvgm+AAYJwAnAAAYJwwnDAAQJxAnEAAYJ2QnZAAYJ3QndAAYJ4AngAAcJ/An9AAQJ/wn/AAYKAQoBAAQKAwoEAAYKCAoIAAYKCgoKAAYKDAoMAAYKLwovAAIKUwpTAAIKVwpXAAYKWQpZAAYKYQpiAAQKZApkAAQKZgpmAAYKaApoAAQKagptAAYKcQpxAAMKdAp0AAYKdgp2AAYKeAp4AAYKewp7AAYKfQp9AAYKhQqGAAQKiAqIAAQKigqKAAYKjAqMAAQKjgqRAAYKlQqVAAMKmAqYAAYKmgqaAAYKnAqcAAYLNAs0AAQLNgs2AAQLOQs5AAQLPws/AAQLRAtEAAQLRQtFAAYLRwtHAAYLSgtKAAULTgtOAAYLZgtmAAILaAtoAAILaQtpAAEABQAAAAEACAACEkoAGAAIAAACsAAAAAAAAAAAAAAAAAACADUJggmCAAYJhAmEAAYJjAmNAAQJjwmPAAUJkQmRAAYJkwmTAAQJlQmZAAYJnQmeAAMJogmiAAYJpgmmAAYJvgm+AAYJwAnAAAYJwwnDAAUJxAnEAAYJ2QnZAAYJ3QndAAYJ4AngAAcJ/An9AAQJ/wn/AAYKAQoBAAQKAwoEAAYKCAoIAAYKDAoMAAYKNAo0AAIKVwpXAAYKWQpZAAYKYQpiAAQKZApkAAUKZgpmAAYKaApoAAQKagptAAYKcQpxAAMKdAp0AAYKeAp4AAYKewp7AAYKfQp9AAYKhQqGAAQKiAqIAAUKigqKAAYKjAqMAAQKjgqRAAYKlQqVAAMKmAqYAAYKnAqcAAYLNAs0AAQLNgs2AAQLOQs5AAQLPws/AAULRAtEAAQLRwtHAAYLSgtKAAULTgtOAAYLaQtpAAEABQAAAAEACAACEOgAGAAIAAABTgAAAAAAAAAAAAAAAAACADMJggmCAAYJhAmEAAYJjAmNAAQJjwmPAAUJkwmTAAUJlQmWAAYJmAmYAAYJnQmeAAMJogmiAAYJpgmmAAYJvgm+AAYJwAnAAAYJwwnDAAUJxAnEAAYJ2QnZAAYJ4AngAAcJ/An9AAQKAQoBAAUKCAoIAAYKDAoMAAYKVwpXAAYKWQpZAAYKYQpiAAQKZApkAAUKZgpmAAYKaApoAAUKagpqAAYKbApsAAYKcQpxAAMKdAp0AAYKeAp4AAYKewp7AAYKfQp9AAYKhQqGAAQKiAqIAAUKigqKAAYKjAqMAAUKjgqOAAYKkAqQAAYKlQqVAAMKmAqYAAYKnAqcAAYKugq6AAIK4wrjAAILNAs0AAQLNgs2AAQLOQs5AAQLPws/AAULRAtEAAULSgtKAAULaQtpAAEACAASAB4AKgA2AEIAUABeAGwAAwABAAIAAwAAAEgAAwABAAIABAAAAEkAAwABAAIABQAAAEoAAwABAAIABgAAAEsABAABAAIABwADAAAASAAEAAEAAgAHAAQAAABJAAQAAQACAAcABQAAAEoABAABAAIABwAGAAAASwAFAAAAAQAIAAIPGAAWAAcAAAJ6AAAAAAAAAAAAAAACACUJggmCAAUJjAmNAAQJjwmPAAQJkwmTAAQJmAmYAAUJnQmeAAMJpgmmAAUJvgm+AAUJwwnDAAQJxAnEAAUJ4AngAAYJ/An9AAQKAQoBAAQKDAoMAAUKKAooAAIKTApMAAIKVwpXAAUKYQpiAAQKZApkAAQKZgpmAAUKaApoAAQKbApsAAUKcQpxAAMKewp7AAUKhQqGAAQKiAqIAAQKigqKAAUKjAqMAAQKkAqQAAUKlQqVAAMLNAs0AAQLNgs2AAQLOQs5AAQLPws/AAQLRAtEAAQLSgtKAAQLaQtpAAEABQAAAAEACAACDhgAFgAHAAABegAAAAAAAAAAAAAAAgAcCYwJjQAECY8JjwAECZMJkwAECZ0JngADCaIJogAFCaYJpgAFCcMJwwAECeAJ4AAGCfwJ/QAECgEKAQAECgwKDAAFCjMKMwACCmEKYgAECmQKZAAECmgKaAAECnEKcQADCoUKhgAECogKiAAECowKjAAECpUKlQADCzQLNAAECzYLNgAECzkLOQAECz8LPwAEC0QLRAAEC0oLSgAFC2kLaQABC8YLxgACAAUAAAABAAgAAg1OABYABwAAALAAAAAAAAAAAAAAAAIAGQmMCY0ABAmPCY8ABAmTCZMABAmdCZ4AAwnDCcMABAngCeAABgn8Cf0ABAoBCgEABAofCh8AAgpDCkMAAgphCmIABApkCmQABApoCmgABApxCnEAAwqFCoYABAqICogABAqMCowABAqVCpUAAws0CzQABAs2CzYABAs5CzkABAs/Cz8ABAtEC0QABAtKC0oABQtpC2kAAQAGAA4AGgAmADIAQABOAAMAAQACAAMAAABJAAMAAQACAAQAAABKAAMAAQACAAUAAABLAAQAAQACAAYAAwAAAEkABAABAAIABgAEAAAASgAEAAEAAgAGAAUAAABLAAUAAAABAAgAAgw6ABQABgAABoQAAAAAAAAAAAACABkJjAmNAAQJjwmPAAQJkwmTAAQJnQmeAAMJogmiAAQJwwnDAAQJ4AngAAUJ/An9AAQKAQoBAAQKYQpiAAQKZApkAAQKaApoAAQKcQpxAAMKhQqGAAQKiAqIAAQKjAqMAAQKlQqVAAMLNAs0AAQLNgs2AAQLOQs5AAQLPws/AAQLQgtCAAILRAtEAAQLSgtKAAQLaQtpAAEABQAAAAEACAACC4QAFAAGAAAFzgAAAAAAAAAAAAIAGQmMCY0ABAmPCY8ABAmTCZMABAmdCZ4AAwnDCcMABAngCeAABQn8Cf0ABAoBCgEABAoZChkAAgo9Cj0AAgphCmIABApkCmQABApoCmgABApxCnEAAwqFCoYABAqICogABAqMCowABAqVCpUAAws0CzQABAs2CzYABAs5CzkABAs/Cz8ABAtEC0QABAtKC0oABAtpC2kAAQAFAAAAAQAIAAIKzgAUAAYAAAUYAAAAAAAAAAAAAgAZCYwJjQAECY8JjwAECZMJkwAECZ0JngADCcMJwwAECeAJ4AAFCfwJ/QAECgEKAQAECjIKMgACClYKVgACCmEKYgAECmQKZAAECmgKaAAECnEKcQADCoUKhgAECogKiAAECowKjAAECpUKlQADCzQLNAAECzYLNgAECzkLOQAECz8LPwAEC0QLRAAEC0oLSgAEC2kLaQABAAUAAAABAAgAAgoYABQABgAABGIAAAAAAAAAAAACABgJjAmNAAQJjwmPAAQJkwmTAAQJnQmeAAMJwwnDAAQJ4AngAAUJ/An9AAQKAQoBAAQKYQpiAAQKZApkAAQKaApoAAQKcQpxAAMKhQqGAAQKiAqIAAQKjAqMAAQKlQqVAAMLNAs0AAQLNgs2AAQLOQs5AAQLPws/AAQLRAtEAAQLSgtKAAQLTQtNAAILaQtpAAEABQAAAAEACAACCWgAFAAGAAADsgAAAAAAAAAAAAIAGQmMCY0ABAmPCY8ABAmTCZMABAmdCZ4AAwnDCcMABAngCeAABQn8Cf0ABAoBCgEABAphCmIABApkCmQABApoCmgABApxCnEAAwqFCoYABAqICogABAqMCowABAqVCpUAAwqwCrAAAgrZCtkAAgs0CzQABAs2CzYABAs5CzkABAs/Cz8ABAtEC0QABAtKC0oABAtpC2kAAQAFAAAAAQAIAAIIsgAUAAYAAAL8AAAAAAAAAAAAAgAcCYwJjQADCY8JjwADCZMJkwADCZ0JngADCaYJpgAECcMJwwAECeAJ4AAFCfwJ/QAECgEKAQAECiwKLAACClAKUAACCmEKYgAECmQKZAAECmgKaAAECnEKcQADCoUKhgAECogKiAAECowKjAAECpUKlQADCr0KvQACCuYK5gACCzQLNAAECzYLNgAECzkLOQAECz8LPwAEC0QLRAAEC0oLSgAEC2kLaQABAAUAAAABAAgAAgfqABQABgAAAjQAAAAAAAAAAAACABoJjAmNAAQJjwmPAAQJkwmTAAQJnQmeAAMJwwnDAAQJ4AngAAUJ/An9AAQKAQoBAAQKGwobAAIKPwo/AAIKYQpiAAQKZApkAAQKaApoAAQKcQpxAAMKhQqGAAQKiAqIAAQKjAqMAAQKlQqVAAMKqAqoAAIK0QrRAAILNAs0AAQLNgs2AAQLOQs5AAQLPws/AAQLRAtEAAQLaQtpAAEABQAAAAEACAACBy4AFAAGAAABeAAAAAAAAAAAAAIAFAmMCY0ABAmTCZMABAmdCZ4AAwngCeAABQn8Cf0ABAoBCgEABAoiCiIAAgpGCkYAAgphCmIABApoCmgABApxCnEAAwqFCoYABAqMCowABAqVCpUAAwqzCrMAAgrcCtwAAgs0CzQABAs2CzYABAs5CzkABAtpC2kAAQAFAAAAAQAIAAIGlgAUAAYAAADgAAAAAAAAAAAAAgAOCYwJjQAECZ0JngADCeAJ4AAFCfwJ/QAECmEKYgAECnEKcQADCoUKhgAECpUKlQADCzQLNAAECzYLNgAECzkLOQAEC1ELUQACC1MLUwACC2kLaQABAAUAAAABAAgAAgYiABQABgAAAGwAAAAAAAAAAAACAA4JjAmNAAQJnQmeAAMJ4AngAAUJ/An9AAQKYQpiAAQKcQpxAAMKhQqGAAQKlQqVAAMKwArAAAIK6QrpAAILNAs0AAQLNgs2AAQLOQs5AAQLaQtpAAEABAAKABYAIgAwAAMAAQACAAMAAABKAAMAAQACAAQAAABLAAQAAQACAAUAAwAAAEoABAABAAIABQAEAAAASwAFAAAAAQAIAAIFcAASAAUAAADKAAAAAAAAAAIAEAmdCZ4AAwngCeAABAoaChoAAgoeCh4AAgo+Cj4AAgpCCkIAAgpxCnEAAwqVCpUAAwquCq4AAgq5CrkAAgrCCsMAAgrFCsUAAgrXCtcAAgriCuIAAgrrCuwAAgtpC2kAAQAFAAAAAQAIAAIE8gASAAUAAABMAAAAAAAAAAIACQmMCYwAAwmTCZMAAwmdCZ4AAwngCeAABAoYChgAAgo8CjwAAgpxCnEAAwqVCpUAAwtpC2kAAQACAAYAEgADAAEAAgADAAAASwAEAAEAAgAEAAMAAABLAAUAAAABAAgAAgR+AB4ACwAAA3YAAAAAAAAAAAAAAAAAAAAAAAAAAgCOCYIJggAECYMJgwAJCYQJhAAECYUJiAAFCYkJiwAICYwJjQADCY4JjgAFCY8JjwADCZAJkAAHCZEJkQAECZIJkgAGCZMJkwADCZQJlAAFCZUJmQAECZoJmgAHCZsJmwAFCZwJnAAECZ0JngACCZ8JoQAGCaIJogAECaMJowAGCaQJpAAECaUJpQAGCaYJpgAECb4JvgAECb8JvwAJCcAJwAAECcEJwQAICcIJwgAFCcMJwwADCcQJxQAECdkJ2QAECdoJ2gAHCdwJ3AAFCd0J3QAECfYJ+QAFCfoJ+wAICfwJ/QADCf4J/gAHCf8J/wAECgAKAAAGCgEKAQADCgIKAgAFCgMKBAAECgUKBQAHCgYKBgAFCgcKBwAGCggKCAAECgkKCQAGCgoKCgAECgsKCwAGCgwKDAAECg0KDQAHCg4KDgAGClcKVwAEClgKWAAJClkKWQAECloKXQAFCl4KXgAICl8KXwAJCmAKYAAICmEKYgADCmMKYwAFCmQKZAADCmUKZQAHCmYKZgAECmcKZwAGCmgKaAADCmkKaQAFCmoKbQAECm4KbgAHCm8KbwAFCnAKcAAECnEKcQACCnIKcgAHCnMKcwAGCnQKdAAECnUKdQAHCnYKdgAECncKdwAHCngKeAAECnkKeQAICnoKegAHCnsKewAECnwKfAAJCn0KfQAECn4KgQAFCoIKggAICoMKgwAJCoQKhAAICoUKhgADCocKhwAFCogKiAADCokKiQAHCooKigAECosKiwAGCowKjAADCo0KjQAFCo4KkQAECpIKkgAHCpMKkwAFCpQKlAAECpUKlQACCpYKlgAHCpcKlwAGCpgKmAAECpkKmQAHCpoKmgAECpsKmwAHCpwKnAAECzELMQAGCzQLNAADCzYLNgADCzkLOQADCzsLPAAFCz8LPwADC0ELQQAGC0MLQwAGC0QLRAADC0ULRQAEC0YLRgAIC0cLRwAEC0gLSAAIC0oLSgADC0sLSwAHC0wLTAAGC04LTgAEC08LTwAKC1ALUAAHC1ILUgAHC1ULVQAHC1YLVwAEC1gLWQAFC1oLWgAHC1sLWwAFC1wLXAAJC10LXQAIC14LXwAHC2ALYwAGC2ULZQAGC2cLZwAGC2kLaQABAAkAFAAeACgAMgA8AEYAUABaAGQAAgABAAIAAABBAAIAAQADAAAAQgACAAEABAAAAEMAAgABAAUAAABEAAIAAQAGAAAARQACAAEABwAAAEYAAgABAAgAAABHAAIAAQAJAAAASAACAAEACgAAAEoAAQAAAAEACAABAJIAAQABAAAAAQAIAAEAhAACAAEAAAABAAgAAQB2AAMAAQAAAAEACAABAGgABAABAAAAAQAIAAEAWgAFAAEAAAABAAgAAQBMAAYAAQAAAAEACAABAD4ABwABAAAAAQAIAAEAMAAIAAEAAAABAAgAAQAiAAkAAQAAAAEACAABABQACgABAAAAAQAIAAEABgALAAEAAQtpAAUAAAABAAgAAgEcABQABgAAAJwAAAAAAAAAAAACABYJbwlvAAMJggmmAAIJvgnFAAIJ9goOAAIKDwoPAAQKVwqcAAILHAscAAULMQsxAAILNAs0AAILNgs2AAILOAs5AAILOws8AAILPws/AAILQQtBAAILQwtIAAILSgtMAAILTgtQAAILUgtSAAILVQtjAAILZQtlAAILZwtnAAILaQt0AAEAAwAIABgAKAADAAIAAgADAAAATgACAE0AAwACAAIABAAAAE8AAgBNAAMAAgACAAUAAABQAAIATQABAAAAAQAIAAIADAADC5kLmQuZAAEAAwlvCg8LHAABAAAAAQAIAAEAIgAMAAEAAAABAAgAAQAUABgAAQAAAAEACAABAAYAJAACAAELaQt0AAAABgAAAAEACAACAYgAEAFeD9AAAgAAAWYAAgA3CYIJggACCYQJhAACCYwJjQACCY8JjwACCZEJkQACCZMJkwACCZUJmQACCZwJngACCaIJogACCaQJpAACCaYJpgACCb4JvgACCcAJwAACCcMJxQACCdkJ2QACCd0J3QACCfwJ/QACCf8J/wACCgEKAQACCgMKBAACCggKCAACCgoKCgACCgwKDAACClcKVwACClkKWQACCmEKYgACCmQKZAACCmYKZgACCmgKaAACCmoKbQACCnAKcQACCnQKdAACCnYKdgACCngKeAACCnsKewACCn0KfQACCoUKhgACCogKiAACCooKigACCowKjAACCo4KkQACCpQKlQACCpgKmAACCpoKmgACCpwKnAACCzQLNAACCzYLNgACCzkLOQACCz8LPwACC0QLRQACC0cLRwACC0oLSgACC04LTgACC1YLVwACC2oLbQABAAEJbgABAAEAAQAEAAIAAgABAAEAAAABAAAAUgABAAAAAQAIAAEABgJxAAEAAQluAAUAAAABAAgAAgAYAHQACAAAARoBKgE6AVQAAAAAAAAAAQAsCdkJ2gncCd0KWwpdCmEKYgpjCmQKcwp/CoEKhQqGCocKiAqXCzQLNgs5CzsLPAs/C0MLRAtFC0YLRwtIC0kLSgtQC1ILVwtZC1oLWwtcC10LXgtfC2ILYwACABsJrAmuAAUJrwmvAAYJuAm4AAUJyAnJAAYJ2QnaAAQJ3AndAAQKEAoQAAcKWwpbAAMKXQpdAAMKYQpkAAMKcwpzAAMKfwp/AAMKgQqBAAMKhQqIAAMKlwqXAAMLNAs0AAELNgs2AAELOQs5AAELOws8AAELPws/AAELQwtKAAILUAtQAAILUgtSAAILVwtXAAMLWQtZAAMLWgtfAAILYgtjAAMAAgAGADAAAgABAAUAAABXAAIABgAgAAIAAQAFAAEAVgACAAYAEAACAAEABQAAAFUAAgABAAYAAABXAAEABAADAAEABwAFAAEAVAACAAAAAQAIAAEACAABAA4AAQABChAAAQuZAAIAAAABAAgAAQAqABIAUgBYAF4AZABqAHAAdgB8AIQAjACUAJwApACsALQAugDAAMYAAQASClsKXQphCmIKYwpkCnMKfwqBCoUKhgqHCogKlwtXC1kLYgtjAAIJhguZAAIJiAuZAAIJjAuZAAIJjQuZAAIJjguZAAIJjwuZAAIJoAuZAAMJhgmnC5kAAwmICacLmQADCYwJpwuZAAMJjQmnC5kAAwmOCacLmQADCY8JpwuZAAMJoAmnC5kAAgtWC5kAAgtYC5kAAgtgC5kAAwtgCacLmQABAAAAAQAIAAIADgAEC9YL1wvYC9wAAQAECawJrQmuCbgAAgAAAAEACAABAFYAKACqALAAtgC8AMIAyADOANQA2gDgAOYA7ADyAPgA/gEEAQoBEAEWARwBIgEoAS4BNAE6AUABRgFOAVQBWgFgAWgBcAF2AXwBggGIAY4BlAGaAAEAKApbCl0KYQpiCmMKZApzCn8KgQqFCoYKhwqICpcLNAs2CzkLOws8Cz8LQwtEC0ULRgtHC0gLSQtKC1ALUgtXC1kLWgtbC1wLXQteC18LYgtjAAILtgoQAAILtwoQAAILuAoQAAILuQoQAAILugoQAAILuwoQAAILvAoQAAILvgoQAAILvwoQAAILwAoQAAILwQoQAAILwgoQAAILwwoQAAILxAoQAAIKGwmMAAIKGwmNAAIKHAmNAAIKHQmPAAIKHQmOAAIKHgmPAAIKIgmFAAIKIgmEAAIKIgmZAAIKIgmaAAIKIgmiAAIKIgmUAAMKIgojCZwAAgoiCZMAAgvGCYcAAgvGCaIAAwowC7gKEAADCjALuQoQAAIKMgmQAAIKMgmVAAIKMgmbAAIKMgmcAAIKMgmfAAIKMgmiAAILvQoQAAILxQoQAAUAAAABAAgAAgASAKIABQAAAboByAHaAAAAAQBGCYIJhgmICYwJjQmOCY8JlgmYCZ0JngmhCb4JwAnBCcIJwwnECcUJ9gn3CfgJ+Qn6CfsJ/An9Cf4J/woACgEKAgoDCgQKBQoGCgcKCAoJCgoKVwpoCmwKewp9Cn4KgAqCCoQKiQqKCosKjAqNCo4KjwqQCpEKkgqTCpQKlgqYCpkKmgsxC1YLWAthC2cAAgAuCYIJggADCYYJhgADCYgJiAADCYwJjwADCZYJlgACCZgJmAADCZ0JngADCaEJoQABCb4JvgACCcAJwAACCcEJwQABCcIJxAACCcUJxQABCcgJyQAECfYJ9gABCfcJ9wACCfgJ+AABCfkJ+QACCfoJ+wABCfwJ/QACCf4KAAABCgEKAQACCgIKCgABClcKVwADCmgKaAADCmwKbAADCnsKewACCn0KfQACCn4KfgABCoAKgAABCoIKggABCoQKhAABCokKiwABCowKjAACCo0KjQABCo4KjgACCo8KjwABCpAKkAACCpEKlAABCpYKlgABCpgKmgABCzELMQADC1YLVgADC1gLWAADC2ELYQABC2cLZwABAAEABAACAAEABAAAAFsAAQAEAAIAAgAEAAAAWwABAFwAAQAEAAIAAQAEAAEAXQAFAAAAAQAIAAIAGgCKAAkAAAGKAZgBygHOAeoAAAAAAAAAAQA2CaEJvgnACcEJwgnDCcQJxQn2CfcJ+An5CfsJ/An9Cf4J/woACgEKAgoDCgQKBwoICgkKCgpYCmgKewp8Cn0KfgqACoIKhAqJCooKiwqMCo0KjgqPCpAKkQqSCpMKlAqWCpgKmQqaCpwLYQtnAAIAKgmhCaEAAwmsCawABgmtCa0ABwmuCa8ACAm+Cb4AAQnACcAAAQnBCcEAAwnCCcMAAgnECcUAAQn2CfYAAQn3CfcAAgn4CfgAAQn5CfkAAgn7CfsAAQn8Cf0AAgn+Cf4AAQn/Cf8ABAoACgAAAQoBCgEABQoCCgIABAoDCgQAAQoHCgcABAoICggAAQoJCgkAAwoKCgoAAQpYClgAAQpoCmgAAQp7CnsABAp8CnwAAQp9Cn4ABAqACoAABAqCCoIAAwqECoQAAwqJCokAAwqKCpQABAqWCpYABAqYCpgABAqZCpkAAwqaCpoABAqcCpwAAQthC2EABAtnC2cAAQABAAQAAgABAAYAAQBaAAMACAAWACQAAgACAAYAAABbAAEAXAACAAIABwAAAFsAAQBcAAIAAgAIAAAAWwABAFwAAQAMAAMACAASACAAAgABAAYAAABbAAIAAQAHAAAAWwABAAQAAgABAAgAAABbAAEAAAABAAgAAQAGAfwAAQABCawAAgAAAAEACAABAHwAOwDUANoA4ADmAOwA8gD4AP4BBAEKARABFgEcASIBKAEuATQBOgFAAUYBTAFSAVgBXgFkAWoBcAF2AXwBggGIAY4BlAGaAaABpgGsAbIBuAG+AcQBygHQAdYB3AHiAegB7gH0AfoCAAIGAgwCEgIYAh4CJAIqAjAAAgAOCZYJlgAACaEJoQABCb4JvgACCcAJxQADCfYKCgAJCnsKewAeCn0KfgAfCoAKgAAhCoIKhAAiCokKlAAlCpYKnAAxC2ELYQA4C2MLYwA5C2cLZwA6AAIJlQmnAAIJoAmnAAIJggmnAAIJhAmnAAIJiQmnAAIJjgmnAAIJjwmnAAIJmAmnAAIJnAmnAAIJhQmnAAIJhgmnAAIJhwmnAAIJiAmnAAIJigmnAAIJiwmnAAIJjAmnAAIJjQmnAAIJkAmnAAIJkQmnAAIJkgmnAAIJkwmnAAIJlAmnAAIJlwmnAAIJmQmnAAIJmgmnAAIJmwmnAAIJnwmnAAIJogmnAAIJowmnAAIJpAmnAAIKVwmnAAIKWQmnAAIKWgmnAAIKXAmnAAIKXgmnAAIKXwmnAAIKYAmnAAIKZQmnAAIKZgmnAAIKZwmnAAIKaAmnAAIKaQmnAAIKagmnAAIKawmnAAIKbAmnAAIKbQmnAAIKbgmnAAIKbwmnAAIKcAmnAAIKcgmnAAIKcwmnAAIKdAmnAAIKdQmnAAIKdgmnAAIKdwmnAAIKeAmnAAILYAmnAAILYgmnAAILZQmnAAQAAAABAAgAAQA6AAEACAAGAA4AFAAaACAAJgAsC6cAAgmsC6oAAgmtC6wAAgmuC64AAgmvC7EAAgnIC7QAAgnJAAEAAQmnAAEAAAABAAgAAgAKAAILrwuzAAEAAgnICckABAAAAAEACAABAaQACgAaAFIAZAB2AJgA+gEUATYBcAGCAAYADgAWAB4AJgAsADIK/gADCacJrAr/AAMJpwmtCwAAAwmnCa4K+QACCawK+gACCa0K+wACCa4AAgAGAAwK9wACCawK+AACCa0AAgAGAAwK/AACCawK/QACCa0ABAAKABAAFgAcCu0AAgmsCu4AAgmtCu8AAgmuCvAAAgmvAAsAGAAeACQAKgAwADYAPABCAEoAUgBaC6YAAgmsC6kAAgmtC6sAAgmuC60AAgmvC7UAAgm4C7AAAgnIC7IAAgnJC8kAAwuZCawLzAADC5kJrQvPAAMLmQmuC9UAAwuZCbgAAwAIAA4AFAr+AAIJrAr/AAIJrQsAAAIJrgAEAAoAEAAWABwK8QACCawK8gACCa0K8wACCa4K9AACCa8ABwAQABYAHAAiACgALgA0C8cAAgmsC8oAAgmtC80AAgmuC9AAAgmvC9MAAgm4C9EAAgnIC9IAAgnJAAIABgAMCvUAAgmsCvYAAgmtAAQACgAQABYAHAvIAAIJrAvLAAIJrQvOAAIJrgvUAAIJuAABAAoJkwmdCZ4JpgmnCgEKDAoQCngLmQAFAAAAAQAIAAEAUAACAAoACgACAAYAFAACAAIJrAAAAGAAAQBiAAIAAgmtAAAAYQABAGIAAQAAAAEACAACABwAAgr3CvwAAQAAAAEACAACAAoAAgr4Cv0AAQACCnEKlQABAAAAAQAIAAIACgACChAKEAABAAIJrAmtAAYAAAABAAgAAgG0ABABHAEyAAIAAAE6AAIALAmCCYIAAwmGCYYAAQmICYgAAQmMCYwAAQmNCY0AAgmPCY8AAQmTCZMAAQmYCZgAAwmgCaEAAQm+Cb4AAwnDCcMAAQnECcQAAwn3CfcAAQn5CfkAAQn8CfwAAQn9Cf0AAgoBCgEAAQpXClcAAwpbClsAAQpdCl0AAQphCmEAAQpiCmIAAgpkCmQAAQpoCmgAAQpsCmwAAwpzCnMAAQp7CnsAAwp/Cn8AAQqBCoEAAQqFCoUAAQqGCoYAAgqICogAAQqMCowAAQqQCpAAAwqXCpcAAQsxCzEAAws0CzQAAQs2CzYAAgs5CzkAAgs/Cz8AAQtDC0gAAQtKC0oAAQtaC1sAAQteC2MAAQACAAMJqwmrAAELAQsDAAEL6AvoAAEAAQlrAAEAAQADAAgAFgAkAAEAAQABAAAAAQAAAGQAAQACAAEAAAABAAAAZQABAAMAAQAAAAEAAABmAAEAAAABAAgAAgBAAAULmgudC6ALowvpAAEAAAABAAgAAgAoAAULmwueC6ELpAvqAAEAAAABAAgAAgAQAAULnAufC6ILpQvrAAEABQmrCwELAgsDC+gABAAAAAEACAABAN4AEgAqADQAPgBIAFIAXABmAHAAegCEAI4AmACiAKwAtgDAAMoA1AABAAQKygACC9UAAQAECtEAAgvVAAEABArTAAIL1QABAAQK1QACC9UAAQAECtcAAgvVAAEABApGAAILtQABAAQKQQACCbgAAQAECkIAAgm4AAEABAo5AAIJuAABAAQKPwACCbgAAQAECkAAAgm4AAEABArDAAIJuAABAAQK7AACCbgAAQAECqEAAgvTAAEABAqoAAIL0wABAAQKqgACC9MAAQAECqwAAgvTAAEABAquAAIL0wABABIJhgmMCY0JjgmPCZMJwgnDCfcJ/An9CngKnAu2C7gLuQu6C7sAAA=="),this.addFont("NotoSans-Regular.ttf","NotoSans","normal")}])},Re=e=>{e&&e.API.events.push(["addFonts",function(){this.addFileToVFS("NotoSans-Bold.ttf","AAEAAAARAQAABAAQR0RFRkDtS4AABRdYAAAHFEdQT1Pgpko1AAUebAAAr85HU1VCegiGMQAFzjwAAIdgT1MvMnj9nGUAAAGYAAAAYGNtYXDrHLM9AAAyjAAABwRjdnQgJwgn0AAAQ/AAAAESZnBnbTYLFgwAADmQAAAHtGdhc3AAEQAjAAUXSAAAABBnbHlmXFRc9wAAdZwABJ0gaGVhZAOuy/cAAAEcAAAANmhoZWEOyQx1AAABVAAAACRobXR4r0Gm1wAAAfgAADCSbG9jYRyjnYwAAEUEAAAwmG1heHAO2QUiAAABeAAAACBuYW1lalGXEwAFErwAAARscG9zdP9pAGYABRcoAAAAIHByZXC1ziNUAABBRAAAAqsAAQAAAAEKPaHq+vBfDzz1AAsIAAAAAADPKrupAAAAAM8qu6v6sfzaC4gIjQABAAkAAgABAAAAAAABAAAIjf2oAAALsvqx9vYLiAABAAAAAAAAAAAAAAAAAAAMJAABAAAMJQFSAFQAawALAAIAEAAXAFwAAAHlA0sAAwABAAME5gK8AAUACAWaBTMAAAEfBZoFMwAAA9EAZgIACAICCwgCBAUEAgIE4ACC/0AAeP8AAAAhAAAAAE1PTk8AIAAA//0Ijf2oAAAIjQJYIAABn9/XAAAEXgW2AAAAIAAEBM0AwQAAAAAEFAAAAhQAAAJKAHUDxwCFBSsALQSTAFgHNQA/BgAAUgIhAIUCtgBSArYAPQRcAD8EkwBYAkgAPwKTAD0CSAB1A04ADgSTAEoEkwB5BJMATgSTAE4EkwAjBJMAZASTAEgEkwA3BJMASASTAEICSAB1AkgAPwSTAFgEkwBYBJMAWAPRAAYHLQBmBYUAAAVgALgFGQB3BewAuAR7ALgEZAC4BcsAdwYfALgDHQBCAqb/aAVQALgEhQC4B4sAuAaBALgGXgB3BQYAuAZeAHcFSAC4BGgAXgSiACkGDACuBTMAAAe8AAAFVgAABP4AAASiADECpgCPA04ADAKmADMEkwAvA0r//ATbAUwE1QBWBRAAoAQdAFwFEABcBLoAXAMZACkFEABcBUIAoAJxAJMCcf99BPYAoAJxAKAH2wCgBUIAoAT0AFwFEACgBRAAXAOiAKAD+gBcA3kALwVCAJoEjQAABtkAFASgAAoEjQAAA+cANwMnAB8EaAHHAycAUgSTAFgCFAAAAkoAdQSTAI8EkwBSBJMAcQSTAAYEaAHHA+MAagTbARcGqABkAxAALwTsAFIEkwBYApMAPQaoAGQEAP/6A20AXASTAFgDCAAvAwgAOwTbAUwFSACgBT0AcQJIAHUBpP/bAwgAXAMbADkE7ABSBwwALgcMAC4HDABaA9EAPQWFAAAFhQAABYUAAAWFAAAFhQAABYUAAAeeAAAFGQB3BHsAuAR7ALgEewCvBHsAuAMdACoDHQBCAx3/3AMdADkF7AAvBoEAuAZeAHcGXgB3Bl4AdwZeAHcGXgB3BJMAgQZeAHcGDACuBgwArgYMAK4GDACuBP4AAAUGALgFsACgBNUAVgTVAFYE1QBWBNUAVgTVAFYE1QBWB1YAVgQdAFwEugBcBLoAXAS6AFwEugBcAnH/mwJxAJECcf+GAnH/4wT0AFwFQgCgBPQAXAT0AFwE9ABcBPQAXAT0AFwEkwBYBPQAXAVCAJoFQgCaBUIAmgVCAJoEjQAABRAAoASNAAAFhQAABNUAVgWFAAAE1QBWBYUAAATVAFYFGQB3BB0AXAUZAHcEHQBcBRkAdwQdAFwFGQB3BB0AXAXsALgFEABcBewALwUxAFwEewC4BLoAXAR7ALgEugBcBHsAuAS6AFwEewC4BLoAXAR7AK8EugBcBcsAdwUQAFwFywB3BRAAXAXLAHcFEABcBcsAdwUQAFwGHwC4BUIAoAYfAAAFQgAEAx3/8QJx/5sDHQA/AnH/6QMdAAcCcf+vAx0AQgJxAF4DHQBCAnEAoAXDAEIEugCTAqb/aAJx/30FUAC4BPYAoAT2AKAEhQC4AnEAoASFALgCcQBjBIUAuAJxAKAEhQC4A20AoASFAAICcf/nBoEAuAVCAKAGgQC4BUIAoAaBALgFQgCgBjsABgaBALgFQgCgBl4AdwT0AFwGXgB3BPQAXAZeAHcE9ABcB8kAdwfTAFwFSAC4A6IAoAVIALgDogBjBUgAuAOiAFMEaABeA/oAXARoAF4D+gBcBGgAXgP6AFwEaABeA/oAXASiACkDeQAvBKIAKQN5AC8EogApA3kALwYMAK4FQgCaBgwArgVCAJoGDACuBUIAmgYMAK4FQgCaBgwArgVCAJoGDACuBUIAmge8AAAG2QAUBP4AAASNAAAE/gAABKIAMQPnADcEogAxA+cANwSiADED5wA3AxAAoASTAMUFhQAABNUAVgeeAAAHVgBWBl4AdwT0AFwEaABeA/oAXATbALoE2wC6BNsBGwTbAOMCcQCTBJ4BVAGmAAoE2wDPBLQAnASeAdcEngC6BYX/yAJIAHUFCv+dBq7/nQQZ/50GsP/GBh3/iAak/8YDQv/JBYUAAAVgALgEfQC4BUQAOQR7ALgEogAxBh8AuAZeAHcDHQBCBVAAuAUzAAAHiwC4BoEAuASRAFIGXgB3BfYAuAUGALgEvgBOBKIAKQT+AAAG4QBcBVYAAAcCAG0GSgA3Ax0AOQT+AAAFLQBcBHEATgVCAKADQgCgBSkAjwUtAFwFSACgBIsAAgT0AFwEcQBOA/wAXAVCAKAE8gBcA0IAoAT2AKAE7AAIBUgAoATDAAYD/ABcBPQARgXpABkE8gB5A/wAXAU5AFwETgApBSkAjwZWAFwEvP/PBrIAjwbnAG0DQgAMBSkAjwT0AEYFKQCPBucAbQR7ALgGcQApBH0AuAVqAHcEaABeAx0AQgMdADkCpv9oB/4AEAgEALgGcQApBWAAuAVOABQF9gC4BYUAAAUbALgFYAC4BH0AuAYdAAoEewC4B4sAAAUvAF4GlgC4BpYAuAVgALgF9gAQB4sAuAYfALgGXgB3BfYAuAUGALgFGQB3BKIAKQVOABQG4QBcBVYAAAY/ALgF0wBtCKAAuAjpALgF0QAABz8AuAUbALgFTgBICI8AuAVS//YE1QBWBPoAXAUdAKAD0wCgBVAAHQS6AFwG/AAABHEATgXDAKAFwwCgBPQAoAUpAAAGwQCgBUwAoAT0AFwFNwCgBRAAoAQdAFwEbQAvBI0AAAaDAFwEoAAKBYEAoAU/AHsHwQCgB+EAoAWuAAAGzQCgBOkAoAQZAEoHBACgBL4AAAS6AFwFQgAEA9MAoAQxAFwD+gBcAnEAkwJx/+UCcf99BxsAAAcbAKAFQgAEBPQAoASNAAAFYACgBKYAuAQZAKAHvAAABtkAFAe8AAAG2QAUB7wAAAbZABQE/gAABI0AAAQAAFIIAABSCAAAUgNK//wBvAAZAbwAGQJIAD8BvAAZA48AGQOPABkEGwA/BCEAewQhAHEDAgBiBtcAdQo/AD8CIQCFA8cAhQLyAFIC8gBSBI8AdQEK/ncDYgBmBJMAIwSTAFIHIwC4BJMAQgZcAD8EKQApCDkAhwYvACMGSgA3BPQAZgcMADoHDAA7BwwAWgcMAEMEpgA7BUQAOQXuAKYFDAApBJMAWARkACUFqABxA0wAAASTAFgEkwBYBJMAVgSTAFgEqgBYBYkAKQWJACkEngBoAnH/fQQAAV4EAAFeBAABTgMIAAwDCABUAwgAOwMIAC0EAAAACAAAAAQAAAAIAAAAAqoAAAIAAAABVgAABHkAAAJIAAABmgAAAM0AAAAAAAAAAAAACAAAVAgAAFQCcf99AbwAGQXbACkFDAAAB/4AMweLALgH2wCgBYUAAATVAFYIHQABAqoAWAIAAHkIoAApCKAAKQaaAHcFbwBcBxQArgYUAJoAAPwWAAD80AAA++AAAPzZAAD82QR7ALgGlgC4BLoAXAXDAKAItAB3BxQABgViAAAFTAAAB5oAuAZmAKAF1wAABR8AAAgKALgHNwCgBm8AKQT8ABQIlgC4BwoAoAUOACkEcQAfBwIAbQayAI8GXgB3BPQAXAW8AAAE1wAABbwAAATXAAAKjQB3CSkAXAawAHcFbwBcCLQAdwcfAFwItAB3BxQABgVqAHcEMQBcBN8AaAR1ALQEngD0BJ4BzQSeAcsH6QApB6YAKQdUALgGagCgBO4ALwTpAAQFBgC4BRAAoAR5AC8D7gAEBd8AuATRAKAIOwAAB4kAAAUvAF4EcQBOBgwAuAVSAKAFUAC4BMsAoAUlAAQE9gAEBd0AAAWPAAAGugC4BfIAoAasALgGEACgCQAAuAcdAKAGNwB3BT8AXAUZAHcEHQBcBKIAKQRmAC8E/gAABJgAAAT+AAAEmAAABfIAAAUfAAoHcQApBlQALwZvAG0FzwB7BdMAbQU/AHsF0wC4BVQAoAeWAAAFuAAAB5YAAAW4AAADHQBCB4sAAAb8AAAGFAC4BUoAoAa0ABAF0QAABh8AuAVMAKAG3QC4BfQAoAXTAG0FPwB7CEoAuAdoAKADHQBCBYUAAATVAFYFhQAABNUAVgeeAAAHVgBWBHsAdgS6AFwGiQCkBLoAWAaJAKQEugBYB4sAAAb8AAAFLwBeBHEATgS6ADkEpgA5BpYAuAXDAKAGlgC4BcMAoAZeAHcE9ABcBl4AdwT0AFwGXgB3BPQAXAVOAEgEGQBKBU4AFASNAAAFTgAUBI0AAAVOABQEjQAABdMAbQU/AHsEfQC4A9MAoAc/ALgGzQCgBHkALwPuAAQF2wAABSkACgVWAAAEoAAKBRsAXAUQAFwHaABcB2IAXAdOABkG9gA5BZwAGQVKAE4IRAAQB3sAAAhYALgHngCgBmYAdwVOAFwGEAApBd8ALwUvAFgEcQBOBosAEAXLAAAFhQAABNUAVgWFAAAE1QBWBYUAAATVAFYFhQAABNX/0wWFAAAE1QBWBYUAAATVAFYFhQAABNUAVgWFAAAE1QBWBYUAAATVAFYFhQAABNUAVgWFAAAE1QBWBYUAAATVAFYEewC4BLoAXAR7ALgEugBcBHsAuAS6AFwEewC4BLoAXAR7/80Euv/fBHsAuAS6AFwEewC4BLoAXAR7AKsEugBcAx0AQgJxAHUDHQBCAnEAkQZeAHcE9ABcBl4AdwT0AFwGXgB3BPQAXAZeAHcE9P/fBl4AdwT0AFwGXgB3BPQAXAZeAHcE9ABcBpoAdwVvAFwGmgB3BW8AXAaaAHcFbwBcBpoAdwVvAFwGmgB3BW8AXAYMAK4FQgCaBgwArgVCAJoHFACuBhQAmgcUAK4GFACaBxQArgYUAJoHFACuBhQAmgcUAK4GFACaBP4AAASNAAAE/gAABI0AAAT+AAAEjQAABTEAXAAA+38AAPwtAAD7DAAA/C0AAPwxAAD8MQAA/DEAAPwxAAD8MQGmAAoCVgAQBKIAKQN5AC8FEAAEBloACgUbALgFEACgBUoArgUSAJoFGQBIBRkAdwQdAFwF7AAvBuUACgUbAFwFEABcBPQAXAR7AHkGiQCkBS8AWARk/9sFywB3BTMAAAflAKADOQCuAx0ANwVQALgE9gCgAqQAFATsAAgISACuBoH/2wVCAKAGXgB3CQwAdwdvAFwGAAAKBRAAoAVIALgEaABWA/oATgS+AE4C8P+HA3kALwTyAAoDeQAvBKIAKQZKADcFMwAABP4AAATsABQEogAxA+cANwS6ADkEugBQBKYAUASmAGIEjwA3BKAAOQRxAE4D+gBKBRAAoAQhAaIEIQCbBCEAZgJKAHUKeQC4CdMAuAj4AFwHKwC4BvYAuAThAKAJJwC4CPIAuAeyAKAFhQAABNUAVgMd/9wCcf+GBl4AdwT0AFwGDACuBUIAmgYMAK4FQgCaBgwArgVCAJoGDACuBUIAmgYMAK4FQgCaBLoAWAWFAAAE1QBWBYUAAATVAFYHngAAB1YAVgXLAHcFEABcBcsAdwUQAFwFUAC4BPYAoAZeAHcE9ABcBl4AdwT0AFwEugA5BKYAOQp5ALgJ0wC4CPgAXAXLAHcFEABcCGIAuAVoALgGgQC4BUIAoAWFAAAE1QBWBYUAAATVAFYEewBQBLoAXAR7ALgEugBcAx3/cwJx/wwDHQAEAnH/rgZeAHcE9ABcBl4AdwT0AFwFSABxA6L//gVIALgDogCDBgwArgVCAIcGDACuBUIAmgUvAF4EgQAUBh8AuAVCAKAGFwC4BRAAXAYhAG0FCABcBKIAMQPnADcFhQAABNUAVgR7ALgEugBcBl4AdwT0AFwGXgB3BPQAXAZeAHcE9ABcBl4AdwT0AFwE/gAABI0AAAN7AE4GKQCgA5wALwewAFwHsABcBYUAAAUZAHcEHQBcBIUALwSiACkD+gBcA+cANwPVAAYD6QAGBWAAFAYMAAAFMwAABHsAuAS6AFwCpv9oAnH/fQYzAHcFEABcBUgAFAOiAAAE/gAABI0AAATVAJoFEABcBRAAoAUQAKAEHQA/BGYAXAUQAFwFEABcBLoAWAS6AFgGkQBcBHEATgRxAE4FkQBOBQgAXAJx/30FEABcBRAAXATLAFwEjQAABI0AAAVCAJoFQgCgBUIAoAJxAAADQgCgAuMAKQN7AAADPf/sAnEAoAYUAKAH2wCaB9sAmgfbAKAFQv/DBUIAoAXDAKAE9ABcBvoAXAakAF4GgwBcA6IAKwOiACsDogArA6IAoAOiAKADLwCaAy8AKwTXAKwE1wCsA/oAXAJx/8MCcf/DAnH/wwJx/0YDeQBCA3kALwVCAAAFLwAzBVwAmgSNAAAG2QAUBI0AAASNAAAD5wA3BJoANwSmADkEpgAAA9UABgPVADED1QAGBB0AXAZeAHcFHQCgBQgATgTLAFwFTACgAnH/RgT2AAAD0wCgBRAAXAPVAAYD1QAxCAwAXAiJAFwIvgBcBocALwUIAC8HXAAvB7IAKQXDAKAFbQCgBEoAAATZAKAFQv/XBUL/1wRgAJ4EYACeAif/ugMIAJ4DCgAxAwIALQQGAJ4FjQAnA7wAFAG8ABkDjwAZAbwAGQG6ABkAAP+BAAD/gQKJABACiQAhBJMAWASTAFgEkwAtBJMALQAA/5oAAP+GAAD+OwAA/5oAAP6vAAD+4AAA/uAAAP9KAAD/SgAA/4EAAP+BAAD/KQAA/ykAAP8pAAD/KQAA/rwAAP8vA7oAFAItAJ4DZABqA+UAKwNEAEQDWACgA1gAoANYAKADWACgA1gAoANYAKADWACgAAD+TgAA/lYDjwAZAAD+qgAA/qoAAP8AAAD/AAAA/uAAAP5DAAD+QgAA/mMAAP9TAAD/VgAA/1YAAP9WAAD/VgAA/jcAAP43AAD+LwAA/k4AAP6vAAD+VgAA/ncAAP9bAAD+qwAA/NkAAP8HAAD+QwAA/k4AAP+aAAD+7AAA/pYAAP53AAD/PQAA/z0AAP89AAD/PQAA/uAAAP7gAAD/TAAA/0wAAP6TAAD/RgAA/4MAAP8pAAD/KQAA/ykAAP6vAAD+OQAAAAAAAP6rAAD/BgAA/1UAAP8eAAD/NgAA/5oAAP6TAAD+fQAA/k4AAP5OAAD+dwAA/ncAAP5jAAD+rwAA/iEAAP5ZAAD+YQAA/lYAAP0KAAD+mgAA/lYAAP+FAAD+kwAA/zUAAP59AAD/LwAA/30AAP5WAAD+OwAA/4YAAP5jAAD/VQAA/m0AAP+UAAD+QgAA/lkAAP7sAAD/QgAA/ocAAP6HAAD+ngAA/poAAP9GAAD9JQAA/1QAAP8DAAD+lgAA/z0AAP9UAAD/VAAA/ocAAAAAAAAAyAAA/ycAAP53AAD/PQAA/z8AAP8/AAD/QgAA/0IAAP8/AAD/PwAA/z8AAP68AAD+rgAA/5MAAP6sAAD+rgAA/tkAAP6qAAD+rgAA/dEAAP8QAAD+rgAA/oEAAP6BAgYAKQIGACkCBgCKBB0APwQdAFwEHQA/AlIAPwT6AFoGK/+IBQwAAAaDAFwFKf/sBl4AdwT0AFwFGQB3A/wAXARtALgEJwCgBAL/9gRoAFwEwf/sBAj/ZgiYAK4H2wCaBaQAXAUQAFwFdwC4BJYAoAR/AFYEJwAxBUQAOQRQAB8GOQB3BPQAXAVCAB8EbQAKBSn/7AT0AF4EHQBcAnH/fQZeAHcEMQBcBBkASgUGALgFEACgBRkAdweLALgGkwCgBPIAAAUZAEgFGQB3BRkASAAA/nUGXgB3BRAAXAe8AAAG2QAUBH0AFAYdABAHVgBYBFoAEgQnAHkE3wCwBN8ARgO6ALAERABWAnEAkQJK/6oEVACwA7wAKwYnALAFVgCwBTMAeQQpAEgFPwBcBT8AXAU/ACUH0wBYBKoAdQT0AFwE9ABcBDEAsARvAB0EbwAdA7gAKwU3AJoFBABOBqgARAUEAE4EOwAUBmIAKQQCAFYD7ABEA7oAZAQfAB8DwwCwBDkAFATsALAEMQCwBawAbQTfAB0ELQASBbQADgQfAKQEHwASBIcApAN1AKQDdwBtBFwAcQSyAKQCiwBUAif/rgQIAKQDcwCkBbwApATlAKQFBgCkBMkAcQRiAG0D5QCkBBcApAN1ACcEngCaBdkAJQO+AFoDvgCNA/QAaAWaAGID8gCRA/QAaAO4AGgDyQBiA2gAWANvAEgD9ABmAgQAiQPVAJEF9ACRBA4AkQPjAGgDOQBGA+MAaAPjAGgD8gCRAqAAMQQKAIkD+gBgBfIAiwN3ABIC7gAOBB0AkQODABkD4QBmBOkAaAO+AAACAACHAtEAkQQKAIkDdwASBB0AkQODABkDxwCLBOkAaAO+AAAHwQCaBRD/gwUQAFwDGf+0B9v/vAVC/7wFEP+DA6L/gwMv/30D+v/4A3n/pAPnAAQFEACgBDEAlgSHABAIugAvAnEAAANCAAAFEAAABUQAAAUvAAAFEACgBRAAXAMZACkGsgBcBPYAoAJxAHEH2wCgBUIAoAUQAKADogBxA/oAXAQS/8MEjQAABKAACgPnADcE1QBWBRAAXAUQAFwEugBcBHEATgRxAE4F7gBYAnEAkwQdAD8Ccf/DBUIAmgRxAE4DugCNAzUAXgNaAF4DngBSA0gASAJxAEIBsP+oA9cAXgPlAI0BrgAAAnUAjQJMAEYCTABGAfb/pAH0AI0B9P/0AvAAkQWuAJEFrACLA+X/+APlAJEEPQCRA6QAXgSiAF4DAgBeAfb/+AKBADED5QAjA54AKQPbAIsD9ACLAwj/9AMUAFIDFABSA38AUgNMAC0DogBeAAD+XgAA/jUAAP+FAAD+dQAA/tEAAP7BAAD+0QAA/sEAAP45AAD+OQAA/0oAAP9UAAD+hwVgALgFEACgBWAAuAUQAKAFYAC4BRAAoAUZAHcEHQBcBewAuAUQAFwF7AC4BRAAXAXsALgFEABcBewAuAUQAFwF7AC4BRAAXAR7ALgEugBcBHsAuAS6AFwEewCtBLoAXAR7ALgEugBcBHsAuAS6AFwEZAC4AxkAKQXLAHcFEABcBh8AuAVCAKAGHwC4BUIAoAYfALgFQgCgBh8AjQVCAHEGHwC4BUIAoAMd//UCcf+fAx0APwJx/+sFUAC4BPYAoAVQALgE9gCgBVAAuAT2AKAEhQC4AnEAkQSFAAgCcf/nBIUAuAJx/+cEhQC4AnH/hgeLALgH2wCgB4sAuAfbAKAGgQC4BUIAoAaBALgFQgCgBoEAuAVCAKAGgQC4BUIAoAZeAHcE9ABcBl4AdwT0AFwGXgB3BPQAXAZeAHcE9ABcBQYAuAUQAKAFBgC4BRAAoAVIALgDogCgBUgAuAOiAJEFSAC4A6IAkQVIALgDov/rBGgAXgP6AFwEaABeA/oAXARoAF4D+gBcBGgAXgP6AFwEaABeA/oAXASiACkDeQAvBKIAKQN5AC8EogApA3kALwSiACkDeQAvBgwArgVCAJoGDACuBUIAmgYMAK4FQgCaBgwArgVCAJoGDACuBUIAmgUzAAAEjQAABTMAAASNAAAHvAAABtkAFAe8AAAG2QAUBVYAAASgAAoFVgAABKAACgT+AAAEjQAABKIAMQPnADcEogAxA+cANwSiADED5wA3BUIAoAN5AC8G2QAUBI0AAATVAFYDEACgBhsArgUtAFwFLQBcBS0AXAUtAFwFLQBcBS0AXAUtAFwFLQBcBYUAAAWFAAAGuAABBrgAAQa4AAEGuAABBnv/4wZ7/+MEcQBOBHEATgRxAE4EcQBOBHEATgRxAE4FTgABBU4AAQakAAEGpAABBnsAAQZ7/+wFQgCgBUIAoAVCAKAFQgCgBUIAoAVCAKAFQgCgBUIAoAbyAAEG8gABCEgAAQhIAAEIHwABCB//7AgX/84IF//OA0IAoANCAJ4DQv/9A0L/8ANCACwDQgADA0L/1gNC/84ESgABBD0AAQV7AAEFewABBY8AAQWkAAEFav/OBWr/zgT0AFwE9ABcBPQAXAT0AFwE9ABcBPQAXAbuAAEHAAABCGQAAQhkAAEICAABCB0AAQUpAI8FKQCPBSkAjwUpAI8FKQCPBSkAjwUpAI8FKQCPBkwAAQeNAAEHogABB6L/4wbnAG0G5wBtBucAbQbnAG0G5wBtBucAbQbnAG0G5wBtBvoAAQcOAAEIcQABCHEAAQgUAAEIKQABB7z/zge8/84FLQBcBS0AXARxAE4EcQBOBUIAoAVCAKADQgA7A0IAoAT0AFwE9ABcBSkAjwUpAI8G5wBtBucAbQUtAFwFLQBcBS0AXAUtAFwFLQBcBS0AXAUtAFwFLQBcCMcAAAjHAAAJ+gABCfoAAQn6AAEJ+gABCbz/4wm8/+MFQgCgBUIAoAVCAKAFQgCgBUIAoAVCAKAFQgCgBUIAoAozAAEKMwABC4kAAQuJAAELYAABC2D/7AtY/84LWP/OBucAbQbnAG0G5wBtBucAbQbnAG0G5wBtBucAbQbnAG0KOwABClAAAQuyAAELsgABC1YAAQtqAAEK/v/OCv7/zgUtAFwFLQBcBS0AXAUtAFwFLQBcBS0AXAUtAFwFhQAABYUAAAWF/+0Fhf/vCMcAAAI9AJEEngGuAj0AkQTbAM8EpADlBUIAoAVCAKAFQgCgBUIAoAVCAKAFUv/OBUb/zgb2/84G6f/OCWAAuASeARAEngEQBL4A8gNC/68DQv/pA0L/nANC/5wDQv+rA0L/0gMdAAcDHQA/BCn/zgQ7/84EngEQBJ4BEAS+APIFKQCPBSkAjwUpAI8FKQCPBPIAeQTyAHkFKQCPBSkAjwT+AAAE/gAABjv/zgY7/84F2QABBJ4AugSeALoEngF1BucAbQbnAG0G5wBtBucAbQbnAG0HEv/OBrL/zAcf/84Gwf/OCYsANwSeAcsCPQCeAAD/1QAA/yEAAP/XAAD+TASTAFIEaAC+A48AGQAA/9cAAP5OAAD/EgAA/xIAAP8SAZoAAAVtAIUEAP/6AkgAdQAA/xIAAP8SAAD/EAAA/xAAAP8QAAD/EgMIACkDCAAzAwgAKwO+AFoDuABoA+MAaAOgACcDyQBiBJMAKwSTAEoEkwBKB9sAoASTABQHVACaBeUAFASTABAEkwApCBQAKQThAAAEkwAUBd8AdwUzAAAEkwAUBRkAdwSTAIUAAP4RBqgAZAY/AAoD6QA9BwwAOgcMAB4EHQA/CAABogQAARAIAAGiBAABEAgAAaIEAAEQBAABEAEK/ncCSAB1B9UBmAXBARcEqgBkBNUAngSRAFgE1QIjBNUBBAWq//YFAAHXBaoCjQWq//YFqgKNBar/9gWqAo0Fqv/2Bar/9gWq//YFqv/2Bar/9gWqAdkFqgKNBaoB2QWqAdkFqv/2Bar/9gWq//YFqgKNBaoB2QWqAdkFqv/2Bar/9gWq//YFqgKNBaoB2QWqAdkFqv/2Bar/9gWq//YFqv/2Bar/9gWq//YFqv/2Bar/9gWq//YFqv/2Bar/9gWq//YFqgAABaoAAAWqAAAFqgAABaoC1QWqAGYFqgAABdUAAATVAHsE1QAGAtUAbQLVAG0IAAAAB+wBngfsAZEH7AGeB+wBkQTVAKgEwQBiBNUAsgTVACkE1QApAtUAcwgrAbAIagHRB1YBRgYAAdkGAAFSBD8AOwU/ADsEwQBmBBQAQgQAAMUGAAEQBGgAZgSFAAACcQAABIX/xQUGABQFSAC4BNUAVgN5AC8GugC4BVoAoAWNALgFAgCgBKIAMQPnADcGMwB3BLYAAAflAAAHAgAUBI0AFASqALgDyQCgBlYAXAK0ACEAAP9/AAD/fwAA/q4AAP7wA+MAjwPjAI8CKQCBAikAgQIpAIEAAP7wAAD+8AAAAJ4CSABiA80AZgJKAHkCKQCBAAD+tAAA/n0EAP/6AAD82wAA//YAAPzXAAAAAARWAKAEVgCgBFYAoARWAKAEVgBOBFYAUgRWAE4EVgBOBFYARgMQAEYEVgA1BFYANQRWAFAEVgAtBFYASAMQAC0EVgAlBFYAJQRWACUEVgAnBFYALwMQACUEVgAdBFYAFwRWADUEVgA1BFYALwMQACkEVgBQBFYATARWAEwEVgBMBFYAXgMQAEwEVgCgBFYAoARWAKAEVgCgBFYAUARWAEwEVgBGBFYATARWAEwDEABMBFYALwRWADkEVgA/BFYAPwRWAD8DEAA/BFYANQRWADUEVgA1BFYANQRWADUDEAA1BFYATARWAEwEVgBMBFYATARWAEwDEABoBFYATARWAEYEVgBMBFYATARWAEwDEABMBFYAoARWAKAEVgCgBFYAoARWAFYEVgBWBFYAWARWAFYEVgBWAxAAXARWADcEVgA3BFYANwRWADcEVgA3AxAANwRWAEgEVgBGBFYARgRWAEYEVgBGAxAARgRWAIEEVgCBBFYAOQRWADkEVgA5AxAAOQRWAJEEVgCRBFYAkQRWAJEEVgCRAxAATARWAKAEVgCgBFYAoARWAKAEVgBMBFYATARWAEwEVgBMBFYAUAMQAFAEVgAvBFYANQRWADUEVgAXBFYAHQMQACkEVgAvBFYAJwRWACUEVgAlBFYAJQMQACUEVgBIBFYALQRWAFAEVgA1BFYANQMQAC0EVgBGBFYATgRWAE4EVgBSBFYATgMQAEYEVgCgBFYAoARWAKAEVgCgBS0AXAUtAFwFLQBcBS0AXAUtAFwFLQBcBS0AXAUtAFwDQv/pA0L/6QNC/+kDQv/pA0L/3gNC/94DQv/eA0L/3gUpAI8FKQCPBSkAjwUpAI8FKQCPBSkAjwUpAI8FKQCPA0L/6ANC/+gDQv/WA0L/1gUpAI8FKQCPBSkAjwUpAI8GFwC4BoEAuAYMAK4FOQCaAzMARgMzAEYDMwBGAzMARgAA/rQAAP6NAAD+rgAA/p4AAP6eAAD+ngAA/q4AAP6uAAD+iwAA/q4AAP6eAAD+rgAA/q4DUgA9A1IAPQNSAFYDUgBWA1IASgNSAEoDUgBKBJ4AOQgjABAHDAAABvAAuAbjAKAHHf/2Bz0AAAVgALgE9ACgCOwAEAcCAAAJOQC4B1AAoAaRALgFxwCgBroAuAXXAKAEngCuBJMACgTNAMEAAAAAAhQAAAIUAAAAAPyoAAD+PAJMAJsGwwAoBsMAKAkeACgEHwAABB8AAAStAAAGoAAAB1gAAAYPAAAE3QAABN0AAATdAAAE3QAACR4AKAkeACgJHgAoCR4AKAaWAAAHKwAABOEAAAUpAAAGIAAABYYAAAYUAAAGiAAABnUAAAaiAAAEUwAABPIAAAWCAAAE0QAABmIAAAT+AAAFvgBVBHcAAAV+AFcE8gAABPIAAAToAAAGkgAABPYAAAYwAFUFLAAABRUAAAOCAAADggAABegAAAZwAAAGcAAABNgAAAYAAEkFAQAABfsAAASPAAAAAP0nBDgAKwJbAAACWwAAAlv96gAA/CIAAP0EAAD9xwAA/ccAAPyoAAD78wAA/H8AAPxbAlv+7gJb/k4CW/7aAlv+tgAA/jQIHQAoAAD+SQAA/G0AAP2KAAD+SQaWAAAHKwAABOEAAAaIAAAFggAABNEAAAaSAAAFFQAAB1gAAAYPAAAAAPvxAAD78QMwAWwFGwFsBJAAbASQANoEkACgBJAAuASRAHgEkACHBJAApwSQADMEkACKBJAAbgOQAIQCoADJBsMAKAThAAAGdAAABAMAagWCAAAE9gAAAq4AtQAA/8oAAP7VBBQAagbDACgGwwAoCR4AKAQfAAAEHwAABK0AAAagAAAHWAAABg8AAATdAAAE3QAABN0AAATdAAAJHgAoCR4AKAkeACgJHgAoB1gAAAYPAAAGwwAoBSkAAAYgAAAFhgAABhQAAAZ1AAAGogAABFMAAATyAAAGYgAABP4AAAW+AFUEdwAABX4AVwToAAAE9gAABjAAVQUsAAAF6AAABNgAAAYAAEkFAQAABfsAAASPAAAGWABLBbMAAAAA/cgAAPwcBSIAAAV0AAACgwAAAzYAAAYgAAADfQAABhQAAATRAAAEvgAABMcAAARTAAAE8gAABYIAAATRAAAEEgAAAyYAAAOaAFUEdwAAA0oAVwMbAAACwQAABR8AAAM/AAAERgBVA0IAAALxAAADUAAABFoAAAY5AAADIQAAA6wASQM0AAAD/QAABEAAAAQ2AEsD2wAABSIAAAV0AAACgwAAAzYAAAYgAAADfQAABhQAAATRAAAEvgAABMcAAARTAAAE8gAABYIAAATRAAAEEgAAAyYAAAOaAFUEdwAAA0oAVwMbAAACwQAABR8AAAM/AAAERgBVA0IAAALxAAADUAAABFoAAAY5AAADIQAAA6wASQM0AAAD/QAABEAAAAa2AAAHKwAABOEAAAUpAAAGIAAABYYAAAYUAAAG7AAABvYAAAbyAAAEUwAABPIAAAWCAAAE0QAABmIAAATPAAAFvgBVBHcAAAV+AFcE8gAABOgAAAaSAAAFFQAABjAAVQUsAAAFFQAAA4L/4QYkAAAGcAAABPYAAAYnABkFAQAABlUAAAS3AAAG5ABLBjoAAAa2AAAHKwAABOEAAAUpAAAGIAAABYYAAAYUAAAG7AAABvYAAAbyAAAEUwAABPIAAAWCAAAE0QAABmIAAATPAAAFvgBVBHcAAAV+AFcE8gAABOgAAAaSAAAFFQAABjAAVQUsAAAFFQAAA4L/4QYkAAAGcAAABPYAAAYnABkFAQAABlUAAAS3AAAFQgAABXQAAAMFAAADNgAABiAAAAYhAAADfQAABhQAAAU1AAAFPwAABTsAAARTAAAEUwAABPIAAATyAAAFggAABYIAAATRAAAE0QAABIUAAAMOAAADmgBVBHcAAANKAFcDPAAAAsEAAAUfAAADXQAABHkAVQN1AAAC8QAAA1D/6gSoAAAGOQAAA0AAAARwABkDNAAABJ4AAAS3AAAFLQBLBIMAAAVCAAAFdAAAAwUAAAM2AAAGIAAABiEAAAN9AAAGFAAABTUAAAU/AAAFOwAABFMAAARTAAAE8gAABPIAAAWCAAAFggAABNEAAATRAAAEhQAAAw4AAAOaAFUEdwAAA0oAVwM8AAACwQAABR8AAANdAAAEeQBVA3UAAALxAAADUP/qBKgAAAY5AAADQAAABHAAGQM0AAAEngAABLcAAASPAAAEjwAABI8AAASPAAAEtwAABLcAAASPAAAEjwAABLcAAAS3AAAE3wAABN8AAATfAAAE3wAABQYAAAbiAAAEdwAABHcAAAR3AAAFBgAABuIAAAR3AAAEdwAABHcAAAJb/eoCW/3qAlv96gAA+7YAAPutAAD7rQAA+/MAAPvzAAD78wAA/H8AAPx/AAD8fwAA/FsAAPxbAAD8WwJb/ekCW/4IAlv+CAJb/k4CW/5OAlv+TgJb/toCW/7aAlv+2gJb/rYCW/62Alv+tgAA/cgGwwAoBB8AAATdAAAE3QAABN0AAAkeACgJHgAoCR4AKAkeACgGwwAoBsMAKAQfAAAE3QAABN0AAATdAAAJHgAoCR4AKAkeACgJHgAoBsMAKAeuAAALRgAACuwAAARTAAAEUwAABJAAAASQAAAI7gAABPIAAAnPAAAFggAABYIAAAWCAAAKPQAABNEAAAmdAAAF3AAABAQAAAZBAAAEdwAABZgAAAbxAAAFhAAABuYAAAmLAAAErQAABh8AAAYvAAAERAAABRoAAAflAAAGJwAZBHAAGQYnABkEcAAZCUUAGQYnABkFAQAABQEAAAUQAAAFEAAABpoAAAYOAAAHPwAABycAAAbbAAAGcgAABg8AAAYPAAAGDwAABg8AAAlsABkF7QAAA6cAAAXtAAADpwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAlsAAAJbAAACWwAAAAAAAAJb/ZsCW/0CAlv8LgJb/ZsCW/0CAlv8LgJb/ZsCW/0CAlv8LgJb/ZsCW/0CAlv8LgAA+ysAAPsrAAD8ggAA+3kAAPt5AAD8LQAA/C0AAPwtAAD8LQAA+/EAAPqxAAD6sQAA+rEAAPvxAAD6sQAA/OsGIQAABhQAAARTAAAE8gAABYIAAATRAAAGcAAABg8AAAYhAAAGFAAABFMAAATyAAAFggAABNEAAAZwAAAGDwAABAgAGQAA++QAAPvkAAD7GAAA/FgAAPxYAAD7GAAA/FgAAPxYAAD7GAAA/FgAAPwtAAD8LQAA/FgAAPxYAAD7GAAA/CIAAP0EAAD9xwAA/ccAAPvxAAD78QAA/jQErQAAByoAAAAA/SAAAPutAAD78wAA/H8AAPxbAlv97QJb/k4CW/7aAlv+tgJb/eoCW/2bAlv9AgJb/C4AAP09AzkBDwReAN8FUwBPBzIAUgK4AN8DFwCHAxcAaQTbAIgEkABsAlIAPwLtAGoCSAB1A8YASARoAD8EaABcBGgATgRoADkEaAAEBGgAVgRoAEwEaAA3BGgASARoAD8ClwCcApcAZgSQAGwEkABsBJAAbARaAH8DHgDWA8YASAMeAGQEkAAtA0r//ANoAE4EUQFcA2gAZASQAGwC7QBqBAAAUggAAFICrwCfAq4AtQSBAJ8EgQC1BtcAdQSQAIEEkABsBJAAbASQAHwAAAAA/ScAAAAAAAEAAwABAAAADAAEBvgAAAGAAQAABwCAAAAADQAmAD8AWgBfAHoAfgCgAK4ArwDWANcA9gD3AWEBYwF/AZEBkgGfAaEBrgGwAe8B8AH5Af8CFwIbAjYCNwK7ArwCxQLJAtcC3QLyAvMC/wMDAw4DDwMiAyMDbwN1A34DigOMA6EDzgPWA/8EAAQMBA0ETwRQBFwEXwSGBJEFEwUdBScJOQlNCVQJcgl/HcoeAR49Hj8efx6FHpsenh7xHvMe+R8VHx0fRR9NH1cfWR9bH10ffR+0H8Qf0x/bH+8f9B/+IAogDyAiICYgLyAwIDQgOiA8ID4gRCBeIHAgeSB/IJQgqSCsILUguiDwIQUhEyEXISIhJiEuIU4hVCFeIYQhlSGoIgIiBiIPIhIiFSIaIh8iKSIrIkgiYSJlIwIjECMhJQAlAiUMJRAlFCUYJRwlJCUsJTQlPCVsJYAlhCWIJYwlkyWhJawlsiW6JbwlxCXMJc8l2SXmJjwmQCZCJmAmYyZmJmsmbyxtLHcuF6chp4z7BP4j/v///f//AAAAAAANACAAJwBAAFsAYAB7AKAAoQCvALAA1wDYAPcA+AFiAWQBgAGSAZMBoAGiAa8BsQHwAfEB+gIAAhgCHAI3AjgCvAK9AsYCygLYAt4C8wL0AwADBAMPAxADIwMkA3QDegOEA4wDjgOjA9AD1wQABAEEDQQOBFAEUQRdBGAEhwSSBRQFHgkBCTwJUAlYCXsdAB3+HgIePh5AHoAehh6eHqAe8h70HwAfGB8gH0gfUB9ZH1sfXR9fH4Afth/GH9Yf3R/yH/YgACALIBIgJiAqIDAgMiA5IDwgPiBEIF4gaiB0IH8gkCCgIKsgrSC5IPAhBSETIRYhIiEmIS4hTSFTIVshhCGQIagiAiIGIg8iESIVIhkiHiIpIisiSCJgImQjAiMQIyAlACUCJQwlECUUJRglHCUkJSwlNCU8JVAlgCWEJYgljCWQJaAlqiWyJbolvCXEJcolzyXYJeYmOiZAJkImYCZjJmUmaiZvLGAscS4XpxeniPsB/iD+///8//8JawlfAAALzv/jC7P/4wuYCM3/wgto/8ILSP/CCyn/wgIc/8ICAP+wAf8AvAH9AK8B+wBeAfr/SQH0AAAB8AAAAe8HIgHuAAAB6/52AeX/ZQHkAAAB4QBkAeD/QQHfAdsB1/3Q/c/9zv3NAAABg/5l/Zv+Wf2a/hf9mQAA/gkAAP4GAAAEQABtAGsAaQBmAF7oiAAA6FPkFehR43roS+hJ5HrjDuR45+jn5ufk5+Ln4Off597n3efc59rn2efY59bn1efT59LiPwAAAADr+Oeu4eEAAOHb4drnoeHT54LndwAA4ZnnWgAAAADnSQAA5xDhGOELAADg/uD74PTmteax4MjmguZ35mXgJeAi4BoAAOX5AAAAAOXo4APf5wAA383lEeUE5PXjF+MW4w3jCuMH4wTjAeL64vPi7OLl4tLiv+K84rnituKz4qfin+Ka4pPikuKLAADig+J74m/iHOIZ4hjh++H54fjh9eHy3ALb/9pgYWFg+wAACmsNJAJQAAEAAAAAAXwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAVQAAAAAAAAAAAAAAVAAAAAAAAAAAAAAAUwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATwAAAAAAAAAAAAAAAAAAAE6AAABPAAAAU4AAAAAAAAAAAAAAAAAAAFSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEkASwAAAAAAAABRgAAAAAAAAAAAAAAAAE+AAAAAAFEAVYAAAFWAAAAAAAAAVIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABPAAAATwBPgAAAAAAAAE6AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADqAAAAAAAAAAAJbQvxC/IL8wAHC/QACQFJAUoBJAElAUsBTAS0AU0CYAJhBOQCYgVWAlACUQVXBVgFWQJSAmgB9gH3BYMCkAKRApICkwKUApUClgKXAfgB+QlYCVkJWglbCVwJXQWEBYUFhgWHBlMGVAJVAlYMIwnfCeAH0wfUB9UMGAwZAgQH1gIFDBoMGwIIAgkMHAwdAgwH1wINAg4CDwISAhMH3gI7AjwH6AI9Aj4H6QfvB/AH8QIZAhoH8gfzAhsH9Af1A3ICHAwiCWkCHwgBAioMIQgPAiwCLQgQAjAIEgIzCFAJ4QI0AjUCWgJbQEdbWllYVVRTUlFQT05NTEtKSUhHRkVEQ0JBQD8+PTw7Ojk4NzY1MTAvLi0sKCcmJSQjIiEfGBQREA8ODQsKCQgHBgUEAwIBACwgsAFgRbADJSARRmEjRSNhSC0sIEUYaEQtLEUjRmCwIGEgsEZgsAQmI0hILSxFI0YjYbAgYCCwJmGwIGGwBCYjSEgtLEUjRmCwQGEgsGZgsAQmI0hILSxFI0YjYbBAYCCwJmGwQGGwBCYjSEgtLAEQIDwAPC0sIEUjILDNRCMguAFaUVgjILCNRCNZILDtUVgjILBNRCNZILAEJlFYIyCwDUQjWSEhLSwgIEUYaEQgsAFgIEWwRnZoikVgRC0sAbELCkMjQ2UKLSwAsQoLQyNDCy0sALAoI3CxASg+AbAoI3CxAihFOrECAAgNLSwgRbADJUVhZLBQUVhFRBshIVktLEmwDiNELSwgRbAAQ2BELSwBsAZDsAdDZQotLCBpsEBhsACLILEswIqMuBAAYmArDGQjZGFcWLADYVktLIoDRYqKh7ARK7ApI0SwKXrkGC0sRWWwLCNERbArI0QtLEtSWEVEGyEhWS0sS1FYRUQbISFZLSwBsAUlECMgivUAsAFgI+3sLSwBsAUlECMgivUAsAFhI+3sLSwBsAYlEPUA7ewtLLACQ7ABUlghISEhIRtGI0ZgiopGIyBGimCKYbj/gGIjIBAjirEMDIpwRWAgsABQWLABYbj/uosbsEaMWbAQYGgBOlktLCBFsAMlRlJLsBNRW1iwAiVGIGhhsAMlsAMlPyMhOBshEVktLCBFsAMlRlBYsAIlRiBoYbADJbADJT8jITgbIRFZLSwAsAdDsAZDCy0sILADJUVQWIogRYqLRCEbIUVEWS0sIbCAUVgMZCNki7ggAGIbsgBALytZsAJgLSwhsMBRWAxkI2SLuBVVYhuyAIAvK1mwAmAtLAxkI2SLuEAAYmAjIS0sS1NYirAEJUlkI0VpsECLYbCAYrAgYWqwDiNEIxCwDvYbISOKEhEgOS9ZLSxLU1ggsAMlSWRpILAFJrAGJUlkI2GwgGKwIGFqsA4jRLAEJhCwDvaKELAOI0SwDvawDiNEsA7tG4qwBCYREiA5IyA5Ly9ZLSxFI0VgI0VgI0VgI3ZoGLCAYiAtLLBIKy0sIEWwAFRYsEBEIEWwQGFEGyEhWS0sRbEwL0UjRWFgsAFgaUQtLEtRWLAvI3CwFCNCGyEhWS0sS1FYILADJUVpU1hEGyEhWRshIVktLEWwFEOwAGBjsAFgaUQtLLAvRUQtLEUjIEWKYEQtLEUjRWBELSxLI1FYuQAz/+CxNCAbszMANABZREQtLLAWQ1iwAyZFilhkZrAfYBtksCBgZiBYGyGwQFmwAWFZI1hlWbApI0QjELAp4BshISEhIVktLLACQ1RYS1MjS1FaWDgbISFZGyEhISFZLSywFkNYsAQlRWSwIGBmIFgbIbBAWbABYSNYG2VZsCkjRLAFJbAIJQggWAIbA1mwBCUQsAUlIEawBCUjQjywBCWwByUIsAclELAGJSBGsAQlsAFgI0I8IFgBGwBZsAQlELAFJbAp4LApIEVlRLAHJRCwBiWwKeCwBSWwCCUIIFgCGwNZsAUlsAMlQ0iwBCWwByUIsAYlsAMlsAFgQ0gbIVkhISEhISEhLSwCsAQlICBGsAQlI0KwBSUIsAMlRUghISEhLSwCsAMlILAEJQiwAiVDSCEhIS0sRSMgRRggsABQIFgjZSNZI2ggsEBQWCGwQFkjWGVZimBELSxLUyNLUVpYIEWKYEQbISFZLSxLVFggRYpgRBshIVktLEtTI0tRWlg4GyEhWS0ssAAhS1RYOBshIVktLLACQ1RYsEYrGyEhISFZLSywAkNUWLBHKxshISFZLSwgsAJUI7AAVFtYsICwAkNQsAGwAkNUW1ghISEhG7BIK1kbsICwAkNQsAGwAkNUW1iwSCsbISEhIVlZLSwgsAJUI7AAVFtYsICwAkNQsAGwAkNUW1ghISEbsEkrWRuwgLACQ1CwAbACQ1RbWLBJKxshISFZWS0sIIoII0tTiktRWlgjOBshIVktLACwAiURsAIlSWogsABTWLBAYDgbISFZLSwAsAIlEbACJUlqILAAUViwQGE4GyEhWS0sIIojSWSKI1NYPBshWS0sS1JYfRt6WS0ssBIASwFLVEItLLECAUKxIwGIUbFAAYhTWlixAgBCuRAAACCIVFiyAgECQ2BCWbEkAYhRWLkgAABAiFRYsgICAkNgQrEkAYhUWLICIAJDYEIASwFLUliyAggCQ2BCWRu5QAAAgIhUWLICBAJDYEJZuUAAAIBjuAEAiFRYsgIIAkNgQlm5QAABAGO4AgCIVFiyAhACQ2BCWbEmAYhRWLlAAAIAY7gEAIhUWLICQAJDYEJZuUAABABjuAgAiFRYsgKAAkNgQllZWVlZWbEAAkNUWLECAUJZLSxFGGgjS1FYIyBFIGSwQFBYfFloimBZRC0ssAAWsAIlsAIlAbABIz4AsAIjPrEBAgYMsAojZUKwCyNCAbABIz8AsAIjP7EBAgYMsAYjZUKwByNCsAEWAS0ssICwAkNQsAGwAkNUW1ghIxCwIBrJG4oQ7VktLLBZKy0sihDlLUCGCVtQWlU/Wk9aAloBWFVZUFhVMFhAWFBYsFgEV1BWVSBWQFYCUFbwVgJWAVRVVVBUVXBUAR9UATBUQFSAVNBU4FQFME0BTQJOVU9QTlUzTgFOAUtVSlBJVUkBS1VHZEZVP0avRgJGAUtVTFBLVR9LAQ9LP0uvSwNTUFJVO1IBUgFQVVFQUFW4/8BA/yUMEUYhMyBVACAB7yABkCABfyABIAEeVR8zA1UfHgEPHj8erx4Dfl3/H/h9AXh4AXdzQR92c0EfdXMjH3RzKx/ocwF3cwHZc+lzAhkzGFUHMwNVBgP/H4aIAXmGiYaZhgN2gQHJegE5cQGJcZlxAtlw6XACd24Bx24BbGkjH2tpKx9qaTYfZml2aQLYaQFnaQETMxJVBQEDVQQzA1UfAwEPAz8DrwMDBhWHAXmFAUWExYQCqoS6hAJFhFWEAgCCAVqCaoICGIITFkZfe+97/3sDhnKWcgKWaAFaZ2pnAhhnFxpGCWaZZqlmA1lmaWbpZvlmBAllGWUCaWUBZF0ZH0BaWWMBeWOJYwInYwFPYl9i72L/YgRhXTMfYF9QH19dIh9eXTwfqV25XQIcZBtVFjMVVREBD1UQMw9Vrw/PDwIwDwECAQBVAWQAVW8AfwCvAO8ABBAAAYAWAQUBuAGQsVRTKytLuAf/UkuwCVBbsAGIsCVTsAGIsEBRWrAGiLAAVVpbWLEBAY5ZhY2NAEIdS7AyU1iwYB1ZS7BkU1iwQB1ZS7CAU1iwEB2xFgBCWXNzKytzcysrKytzKysrK3Nzc3Qrc3RzdCtzc3NzK3R1c3N0c3Nec3QrKytzc3QrKytzdHNzdHNzc3MrKytzdHQrKysrc3Mrc3QrK3Nzc3QrKysrcytzdCsrcysrKytzKytzc3R0KytzdCtzKytzKxheAAYUAA8APAW2ABcAdQW2ABcAAAAAAAAAAAAAAAAAAAReABcAewAA/+wAAAAA/+wAAAAA/+wAAP4U//YAAAW2ABP8lP/t/lb+FP68/1QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAATMACQAAAPYADgW2ABD+tPz+//T/YP/0AwIAEAGH//IDRgAOAjX/9AK/AAwAAAD2AOsA3gDlAQAA0wErAR0AwADLAMAArAECAQoA9ADmAL8AsACNAJcAxwC3APoA7QDjAQQA/gDbALIApADTAAABVAD2AT8AAADmAOUAowD8ALkAxwE0AOYAAAAAAAAAAABIAAAASAAAAEgAAABIAAAAyAAAARQAAAIEAAADEAAAA/AAAAUMAAAFQAAABZgAAAXwAAAGaAAABuQAAAcwAAAHbAAAB7AAAAfkAAAIaAAACMAAAAlUAAAKSAAACtwAAAt8AAAMNAAADIAAAA1gAAAOHAAADpAAAA8QAAAPYAAAD8QAABAUAAAQ1AAAEfAAABJgAAATVAAAE9AAABRAAAAU5AAAFVwAABYEAAAWoAAAFwgAABdgAAAX1AAAGBQAABicAAAZCAAAGYwAABoYAAAaxAAAG2QAABwYAAAcZAAAHNAAAB0kAAAduAAAHiwAAB6AAAAe6AAAHzAAAB9kAAAfqAAAH+wAACAYAAAgXAAAISAAACHAAAAiNAAAItgAACOgAAAkLAAAJOgAACVkAAAlzAAAJlQAACbQAAAnAAAAJ6gAACgcAAAopAAAKUgAACn4AAAqYAAAKwwAACuQAAAsBAAALFQAACzkAAAtVAAALdAAAC40AAAu6AAALxgAAC/MAAAwZAAAMGQAADDkAAAxiAAAMjQAADMIAAAzxAAANBQAADUQAAA1iAAANowAADdEAAA3wAAAOBwAADgsAAA5SAAAOZQAADoMAAA6kAAAOwwAADvUAAA8GAAAPKQAAD0EAAA9UAAAPcAAAD4UAAA+iAAAPwQAAD8wAAA/XAAAP4gAAEBQAABAgAAAQLAAAEDgAABBEAAAQUQAAEIMAABC6AAAQxAAAENAAABDcAAAQ6AAAEPUAABEBAAARDQAAERkAABEmAAARYAAAEWwAABF4AAARhAAAEZAAABGcAAARqQAAEckAABH9AAASCQAAEhUAABIhAAASLgAAEjoAABJdAAASmAAAEqIAABKrAAAStQAAEr8AABLJAAAS0wAAEx8AABMpAAATMwAAEzwAABNFAAATTwAAE1kAABNjAAATbQAAE3cAABO2AAATwAAAE8oAABPUAAAT3gAAE+cAABPxAAAUJgAAFFoAABRkAAAUbgAAFHcAABSBAAAUigAAFLYAABTAAAAUzAAAFNYAABTiAAAU7AAAFPYAABUAAAAVDAAAFRUAABUhAAAVKgAAFTYAABVAAAAVTAAAFVUAABVhAAAVawAAFW8AABWnAAAVswAAFbwAABXIAAAV0QAAFd0AABXnAAAV8QAAFfsAABYHAAAWEAAAFhwAABYlAAAWMQAAFjoAABZGAAAWUAAAFloAABZkAAAWcAAAFnwAABa2AAAW4wAAFu8AABb5AAAXBQAAFw8AABcbAAAXJQAAFy8AABc4AAAXRAAAF1AAABdaAAAXZQAAF3EAABd7AAAXhQAAF48AABewAAAXvAAAF8gAABfSAAAX3AAAF+gAABfyAAAX/gAAGAgAABgmAAAYQAAAGEwAABhWAAAYYAAAGGoAABh2AAAYfwAAGIkAABivAAAY1AAAGOAAABjpAAAY9QAAGP8AABkMAAAZFgAAGVcAABmgAAAZrAAAGbUAABm/AAAZyQAAGdUAABnfAAAZ6wAAGfQAABoAAAAaCQAAGhMAABodAAAaKQAAGjIAABo7AAAaRQAAGlEAABpeAAAafgAAGqwAABq4AAAawQAAGs0AABrXAAAa4wAAGuwAABr6AAAbBAAAGxEAABsbAAAbJQAAGy8AABs7AAAbRQAAG1EAABtaAAAbZwAAG3MAABt8AAAbiAAAG5IAABueAAAbpwAAG7wAABvlAAAcKQAAHIAAAByMAAAclgAAHKIAAByrAAActQAAHL8AABzVAAAc6wAAHPkAAB0RAAAdHwAAHTwAAB1SAAAddAAAHY4AAB2fAAAdxgAAHdcAAB3cAAAd7wAAHgIAAB4TAAAeJgAAHjcAAB5KAAAeVQAAHlkAAB5dAAAebgAAHnIAAB52AAAeegAAHn4AAB60AAAeuAAAHrwAAB7TAAAe1wAAHtsAAB8EAAAfCAAAHxsAAB8fAAAfQQAAH0UAAB9JAAAffwAAH4MAAB+rAAAf2QAAH+YAAB/zAAAf/AAAIAUAACAOAAAgGAAAICMAACBVAAAgjgAAIKsAACDdAAAhGAAAIUIAACFgAAAhlwAAIawAACGwAAAh2QAAId0AACHzAAAiLAAAIjAAACJTAAAieQAAIp0AACLCAAAi3gAAIvwAACMtAAAjXAAAI4UAACO2AAAjwAAAI8oAACPTAAAj3AAAI+YAACPzAAAkHgAAJCoAACRgAAAkZAAAJGgAACR1AAAkeQAAJLIAACTtAAAlDgAAJRoAACUmAAAlPwAAJUMAACVvAAAlcwAAJXcAACWbAAAlnwAAJcQAACYAAAAmGwAAJicAACZBAAAmXgAAJmIAACZmAAAmagAAJm4AACZyAAAmdgAAJnoAACaZAAAmnQAAJqEAACa6AAAm1gAAJvAAACcPAAAnPAAAJ2wAACeUAAAnyAAAKAYAACgtAAAoMQAAKGMAACicAAAorAAAKNEAACjVAAAo+QAAKTUAAClPAAApWQAAKXMAACmOAAApsAAAKdEAACnVAAAp6QAAKe0AACnxAAAqBAAAKggAACo7AAAqPwAAKlkAACp1AAAqjgAAKq0AACrXAAArBAAAKyoAACtZAAArjgAAK7QAACu+AAAr9gAAK/8AACwvAAAsMwAALDcAACxBAAAsRQAALHkAACysAAAs2QAALOMAACzsAAAtBQAALRkAAC0vAAAtOwAALUUAAC1RAAAtWwAALWgAAC1yAAAtfgAALYgAAC2VAAAtogAALa0AAC3EAAAt1QAALeYAAC35AAAuCgAALiQAAC49AAAuWQAALngAAC6pAAAuugAALt8AAC8rAAAvLwAALzMAAC9FAAAvVwAAL2IAAC9vAAAvkQAAL7gAAC/yAAAwMgAAMIEAADC5AAAw5wAAMSUAADFMAAAxUAAAMXoAADGGAAAxkgAAMZ4AADGqAAAx2gAAMfYAADIKAAAyKAAAMjwAADJWAAAyjQAAMqcAADK2AAAy3wAAMvoAADMVAAAzLgAAMzkAADNDAAAzWAAAM20AADN9AAAzjAAAM50AADO/AAAz4wAAM/UAADQpAAA0KQAANCkAADQpAAA0KQAANCkAADQpAAA0KQAANCkAADQpAAA0KQAANCkAADQpAAA0KQAANNoAADUHAAA1EQAANRUAADVZAAA1dwAANawAADW4AAA1wgAANcwAADXWAAA14AAANgEAADYgAAA2MAAANj8AADZwAAA2oQAANswAADb4AAA2/QAANwIAADcHAAA3HwAANysAADc3AAA3QwAAN00AADdXAAA3lAAAN7sAADfyAAA4JAAAOGUAADicAAA4ywAAOPMAADkyAAA5aAAAOZYAADnMAAA6GAAAOmIAADq8AAA7FAAAOxgAADscAAA7VAAAO4kAADumAAA7wgAAO88AADvaAAA8FgAAPCAAADxcAAA8lQAAPPcAAD1WAAA9YgAAPWwAAD2OAAA9rgAAPdMAAD3xAAA+DwAAPiYAAD4+AAA+vAAAPw0AAD8+AAA/bgAAP6YAAD/fAABAEAAAQEUAAEBiAABAfwAAQK8AAEDaAABBBQAAQTEAAEE7AABBRQAAQWUAAEGGAABBrgAAQdMAAEH9AABCJwAAQkkAAEJoAABCmAAAQsIAAELwAABDFwAAQ0oAAEN4AABDvwAARAQAAEQOAABEGAAARDMAAEROAABEUgAARGwAAESQAABEtAAARNkAAET9AABFHQAART4AAEVkAABFiAAARbIAAEXZAABF9gAARhEAAEZQAABGigAARs4AAEcLAABHDwAARxsAAEclAABHUgAAR30AAEejAABHxgAAR/kAAEglAABIVwAASIIAAEinAABIzwAASPoAAEklAABJKQAASTUAAEk+AABJSwAASVUAAElZAABJXQAASWkAAElyAABJoAAASc8AAEncAABJ5gAASfMAAEn9AABKCgAAShQAAEo9AABKZgAASnIAAEp7AABKiAAASpIAAEqfAABKqQAASuEAAEsWAABLIwAASy0AAEs6AABLRAAAS1AAAEtZAABLZgAAS3AAAEt9AABLhwAAS5QAAEueAABLtgAAS84AAEvbAABL5QAAS/EAAEwiAABMTwAATHwAAEyrAABM2AAATPYAAEz6AABNLgAATWIAAE2VAABNyQAATfYAAE4oAABOUwAATnoAAE6yAABO4QAATwwAAE81AABPWAAAT3gAAE+xAABPtQAAT+EAAFALAABQFQAAUB8AAFArAABQNQAAUEIAAFBNAABQWgAAUGUAAFByAABQfQAAUMgAAFDTAABQ4wAAUPEAAFD+AABRCQAAURYAAFEhAABRLgAAUTkAAFGHAABRkgAAUaIAAFGwAABRugAAUcQAAFHQAABR2gAAUeYAAFHvAABR/AAAUgYAAFITAABSHQAAUioAAFI0AABSiwAAUpUAAFKlAABSswAAUr8AAFLJAABS0wAAUt0AAFLnAABS8QAAUv0AAFMHAABTFAAAUx8AAFMsAABTNwAAU0QAAFNPAABTnwAAU6kAAFO5AABTyAAAU9QAAFPdAABT6QAAU/MAAFP/AABUCQAAVBUAAFQeAABUKAAAVDIAAFQ8AABURgAAVFIAAFRcAABUaAAAVHIAAFR+AABUiAAAVJQAAFSeAABUqgAAVLMAAFS9AABUxwAAVNEAAFTbAABU5wAAVPEAAFT9AABVBgAAVRAAAFUoAABVRAAAVWEAAFWHAABVswAAVdMAAFXzAABWHgAAVkkAAFZfAABWcgAAVnwAAFaGAABWugAAVwYAAFcKAABXNQAAV18AAFeGAABXpgAAV9QAAFf9AABYAQAAWCsAAFhXAABYgwAAWLQAAFjdAABY4QAAWR0AAFlDAABZewAAWaYAAFnSAABZ5wAAWhwAAFpAAABaZwAAWoMAAFrBAABa6QAAWw0AAFsRAABbFQAAW0YAAFt2AABbpwAAW9sAAFwHAABcMgAAXFsAAFxfAABcigAAXLcAAFzVAABc/gAAXRoAAF1IAABdagAAXYkAAF2xAABd5QAAXhIAAF4WAABePgAAXmoAAF6hAABe0wAAXvsAAF8jAABfUQAAX3YAAF+CAABfiQAAX7MAAF+3AABfxwAAX9UAAF/jAABf7QAAX/cAAGABAABgCwAAYBUAAGAfAABgKwAAYDUAAGBBAABgSwAAYFcAAGBhAABgbQAAYHYAAGCEAABgjwAAYJ0AAGCoAABgtgAAYMEAAGDPAABg2gAAYQkAAGEXAABhIgAAYS8AAGE6AABhRgAAYVAAAGGUAABh1QAAYeEAAGHrAABh9wAAYgMAAGINAABiFwAAYicAAGI1AABiQQAAYksAAGJVAABiXwAAYmkAAGJ1AABifgAAYrQAAGLaAABi5gAAYvAAAGL9AABjBwAAYxMAAGMdAABjKgAAYzUAAGNBAABjSwAAY1gAAGNjAABjbwAAY3kAAGOGAABjkQAAY50AAGOnAABjtAAAY78AAGPLAABj1QAAY+IAAGPtAABj+QAAZAMAAGQHAABkPwAAZEsAAGRXAABkdgAAZHoAAGSuAABk4AAAZOoAAGTuAABk+gAAZQQAAGUOAABlGAAAZSYAAGUxAABlPgAAZUgAAGVUAABlXgAAZWgAAGVyAABlfgAAZYcAAGW1AABl9AAAZi0AAGZsAABmqwAAZt8AAGcaAABnUQAAZ30AAGelAABn4AAAaAcAAGgkAABoQgAAaJkAAGjKAABozgAAaRYAAGlkAABpkwAAacEAAGn1AABqJwAAalQAAGp+AABqqAAAauIAAGsUAABrOgAAa2EAAGuRAABrsAAAa+UAAGwZAABsSgAAbHoAAGx+AABsuwAAbL8AAGzDAABtDQAAbUsAAG1sAABtpgAAbdUAAG4CAABuLAAAbmcAAG6GAABurAAAbtoAAG8GAABvCgAAbyIAAG9IAABvcQAAb4YAAG+4AABv4wAAcA8AAHBCAABwZwAAcI0AAHCnAABwqwAAcOgAAHEaAABxHgAAcTgAAHFSAABxdwAAcZIAAHG0AABxxwAAcdsAAHIBAAByJwAAcl0AAHJ6AABypQAAcsIAAHLzAABzHwAAc0EAAHNzAABznQAAc8AAAHPVAABz+AAAdBcAAHQsAAB0UgAAdIMAAHSHAAB0ywAAdOgAAHUGAAB1JAAAdUMAAHVNAAB1UQAAdY4AAHXDAAB1xwAAdfwAAHYbAAB2KwAAdmAAAHaJAAB2swAAdu0AAHc6AAB3jQAAd8gAAHgBAAB4TgAAeIwAAHi7AAB42QAAeRAAAHk0AAB5WwAAeYoAAHmmAAB5ygAAeeYAAHn+AAB6FgAAejkAAHpaAAB6fQAAepwAAHqgAAB6pAAAeqgAAHqsAAB6wgAAetgAAHrxAAB7CwAAexAAAHsVAAB7JgAAezcAAHs7AAB7QAAAe0UAAHtQAAB7WwAAe2gAAHt1AAB7hwAAe5MAAHuYAAB7nQAAe6EAAHulAAB7qQAAe7IAAHvIAAB7zAAAe/cAAHwDAAB8KwAAfEcAAHxjAAB8cQAAfIIAAHyUAAB8pQAAfLMAAHzBAAB80wAAfOIAAHznAAB86wAAfPsAAH0MAAB9HAAAfSwAAH0xAAB9NgAAfVMAAH1hAAB9ZgAAfXIAAH1+AAB9iQAAfZQAAH2jAAB9rwAAfb8AAH3EAAB9yQAAfeEAAH3mAAB96wAAffAAAH30AAB9+QAAff4AAH4DAAB+DQAAfhsAAH4+AAB+VAAAflkAAH5eAAB+YwAAfmgAAH51AAB+ggAAfpAAAH6eAAB+qwAAfrkAAH7JAAB+2AAAfucAAH79AAB/BwAAfxgAAH8pAAB/NAAAf04AAH9TAAB/WAAAf10AAH9oAAB/dQAAf5AAAH+fAAB/rQAAf7kAAH/FAAB/0wAAf94AAH/nAAB/7AAAf/EAAIACAACACwAAgBUAAIAfAACALwAAgD4AAIBOAACAaQAAgIAAAICcAACArwAAgLQAAIC5AACAvgAAgMUAAIDQAACA1QAAgOcAAIDsAACA9wAAgQQAAIEpAACBZQAAgZ4AAIGuAACBuwAAgnAAAIKDAACChwAAgqkAAIK4AACCywAAgt4AAIMAAACDBAAAgwgAAIMeAACDSQAAg2AAAIN1AACDgQAAg44AAIOaAACDtQAAg80AAIPaAACD+wAAhBkAAIQsAACEQwAAhFkAAIRtAACEkAAAhKsAAITMAACE4AAAhPYAAIUHAACFGQAAhSkAAIU6AACFPwAAhUMAAIVPAACFWQAAhV0AAIWdAACFrgAAhbsAAIW/AACF9QAAhh0AAIZDAACGZAAAhpAAAIazAACG0wAAhwcAAIc6AACHYwAAh4sAAIe9AACH7gAAiBkAAIhAAACIZwAAiIwAAIi/AACI8wAAiSYAAIlYAACJjQAAicEAAInuAACKEQAAij4AAIp3AACKewAAin8AAIqDAACKhwAAiosAAIqPAACKkwAAipcAAIq5AACK1AAAiwYAAIsKAACLFQAAix8AAIs8AACLQAAAi0QAAItIAACLTAAAi2sAAIuhAACL8wAAjC4AAIxLAACMZwAAjJoAAIy+AACM9wAAjRQAAI0qAACNRgAAjWgAAI2IAACNowAAjcMAAI3hAACOAgAAjiEAAI5WAACOoAAAjs8AAI7nAACO/QAAjx0AAI9EAACPaAAAj3oAAI+VAACPtQAAj/cAAJAxAACQRgAAkGgAAJCCAACQrgAAkNkAAJD/AACRDwAAkSUAAJE4AACRPAAAkWEAAJF9AACRmwAAkcgAAJH6AACSLgAAkkcAAJJnAACShgAAkqoAAJLKAACS4wAAkvgAAJMUAACTIwAAk0QAAJNeAACTeQAAk5YAAJPAAACT2wAAk/4AAJQQAACUKAAAlEoAAJRyAACUnQAAlL8AAJUDAACVJwAAlUsAAJV0AACVlwAAlcgAAJX3AACWIAAAljkAAJZWAACWfQAAlqAAAJa8AACW1wAAlu4AAJcEAACXKAAAl0QAAJdfAACXewAAl6IAAJe2AACX3AAAmAsAAJgmAACYUgAAmH0AAJimAACYvQAAmNUAAJjcAACY4wAAmOsAAJjyAACZFQAAmR0AAJkkAACZYwAAmaUAAJnrAACaJwAAmnwAAJq8AACbAAAAmzgAAJtoAACbogAAm+AAAJwYAACcSgAAnGcAAJyaAACc5wAAnQUAAJ0sAACdawAAnZcAAJ3OAACeAwAAnjsAAJ5rAACergAAntoAAJ70AACfLQAAn1kAAJ+OAACftgAAn+0AAKAdAACgQgAAoG4AAKCTAACg1AAAoQkAAKFFAAChgAAAoccAAKIOAACiSwAAonQAAKKeAACixQAAovEAAKMoAACjSwAAo2MAAKOTAACjyQAAo/oAAKQXAACkMwAApF0AAKR6AACkoQAApLQAAKTJAACk7gAApRoAAKUtAAClQAAApU8AAKV9AAClpgAApcgAAKXqAACmAwAApikAAKZWAACmhAAApp8AAKbGAACm9QAApxoAAKcyAACnUQAAp2YAAKd+AACnnwAAp8wAAKfvAACoHQAAqDsAAKhaAACogAAAqJsAAKiqAACouQAAqMcAAKjVAACo6wAAqQEAAKkWAACpKwAAqU0AAKlZAACpYwAAqW0AAKl3AACphQAAqZMAAKmjAACpsAAAqbwAAKnGAACp0AAAqdoAAKnoAACp9gAAqgAAAKoKAACqHQAAqi8AAKo9AACqRwAAqlUAAKpfAACqcQAAqoMAAKqWAACqqQAAqroAAKrHAACq0wAAqt8AAKrrAACq9AAAqwAAAKsMAACrFgAAqyAAAKstAACrOgAAq0QAAKtOAACrXQAAq2wAAKt+AACrkAAAq54AAKupAACrtQAAq8EAAKvLAACr1QAAq+MAAKvxAACr+wAArAUAAKwVAACsJQAArDMAAKxBAACsVAAArGYAAKxyAACsfAAArIYAAKyQAACsnAAArKYAAKywAACsugAArMgAAKzWAACs6QAArPsAAK0IAACtEgAArSAAAK0rAACtOQAArUMAAK1RAACtWwAArWcAAK1xAACtfQAArYcAAK2TAACtnQAAracAAK2xAACtwgAArdEAAK3fAACt7QAArfkAAK4DAACuDQAArhcAAK4kAACuLgAArjsAAK5GAACuVwAArmYAAK5yAACugAAArooAAK6UAACuogAArrAAAK7DAACu1QAAruQAAK7zAACvBQAArxgAAK8qAACvPAAAr0kAAK9TAACvYgAAr20AAK95AACvggAAr4wAAK+WAACvogAAr6wAAK+2AACvwAAAr8wAAK/WAACv4wAAr+0AAK/5AACwAwAAsA8AALAYAACwIgAAsCwAALA6AACwSAAAsFYAALBhAACwawAAsHUAALB/AACwiwAAsLwAALDGAACw0AAAsNoAALDkAACw7gAAsPgAALECAACxDAAAsRgAALEkAACxMQAAsT4AALFLAACxWAAAsWgAALF4AACxggAAsYwAALGWAACxoAAAsaoAALG0AACxwAAAscwAALHZAACx5gAAsfMAALIAAACyCgAAshQAALIeAACyKAAAsjIAALI8AACyRgAAslAAALJcAACyaAAAsnUAALKCAACyjwAAspwAALKsAACyvAAAssUAALLPAACy2QAAsuQAALLuAACy+AAAswIAALMNAACzGQAAsyUAALMyAACzPwAAs0wAALNZAACzaQAAs3kAALODAACzjQAAs5cAALOhAACzqwAAs7UAALPBAACzzQAAs9oAALPnAACz9AAAtAEAALQLAAC0FQAAtB8AALQpAAC0MwAAtD0AALRHAAC0UQAAtF0AALRqAAC0dwAAtIcAALSRAAC0mwAAtKUAALSwAAC0ugAAtMQAALTOAAC02QAAtOUAALTxAAC0/gAAtQsAALUYAAC1JQAAtTUAALVFAAC1TwAAtVgAALViAAC1awAAtXUAALV+AAC1iAAAtZIAALWcAAC1pQAAta8AALW4AAC1wgAAtcwAALXaAAC16QAAtfgAALYHAAC2FQAAtiMAALYyAAC2QQAAtlIAALZjAAC2dQAAtocAALaZAAC2qwAAtr8AALbTAAC24gAAtvEAALcAAAC3DwAAtx4AALctAAC3PAAAt0sAALdcAAC3bQAAt38AALeRAAC3owAAt7UAALfKAAC33wAAt+4AALf9AAC4DAAAuBwAALgrAAC4OgAAuEkAALhZAAC4agAAuHsAALiMAAC4nQAAuK4AALi/AAC40wAAuOcAALjwAAC4+QAAuQcAALkRAAC5HwAAuSkAALk3AAC5QwAAuU8AALlaAAC5ZgAAuXAAALmIAAC5ngAAuaIAALmmAAC53QAAuewAALn2AAC6BAAAug4AALodAAC6KQAAujUAALpBAAC6TQAAulcAALp4AAC6mQAAussAALrVAAC63wAAuuoAALr1AAC6/wAAuwoAALsWAAC7IgAAuy4AALs6AAC7WwAAu3sAALutAAC7twAAu8EAALvMAAC71wAAu+EAALvrAAC79QAAvAAAALwMAAC8GAAAvCQAALwwAAC8PAAAvGIAALyIAAC8mwAAvKoAALy0AAC8wwAAvM0AALzcAAC86AAAvPQAAL0AAAC9DAAAvRYAAL0pAAC9QQAAvUoAAL1gAAC9cQAAvYEAAL2MAAC9kwAAva0AAL24AAC9wwAAvdIAAL3lAAC9+AAAvfgAAL3+AAC+AgAAvjMAAL5JAAC+XQAAvmsAAL58AAC+kAAAvqQAAL6/AAC+6QAAvxEAAL8ZAAC/IQAAvykAAL9DAAC/SwAAv4YAAL/LAAC//AAAwDEAAMB+AADAygAAwScAAMFTAADBlgAAwdoAAMIVAADCdQAAwrIAAML0AADDPAAAw20AAMOlAADDqgAAw+oAAMQ3AADEUQAAxFwAAMRnAADEawAAxIUAAMSaAADEtAAAxMkAAMTuAADFDQAAxTEAAMU1AADFOQAAxUcAAMVfAADFfQAAxZIAAMWgAADFuQAAxdEAAMXcAADF6AAAxfcAAMYFAADGFAAAxiIAAMY0AADGRAAAxlUAAMZmAADGfAAAxpQAAMamAADGwAAAxtQAAMbyAADHCwAAxx4AAMc7AADHVQAAx2kAAMeGAADHnwAAx7IAAMfOAADH6wAAyAMAAMgmAADIQQAAyFcAAMh6AADIlwAAyK0AAMjRAADI7gAAyQQAAMkoAADJTwAAyXAAAMmfAADJqwAAybcAAMnDAADJzwAAydsAAMq/AADMgAAAzjAAAM47AADOTQAAzlkAAM5sAADOdwAAzoIAAM6OAADOmgAAzqUAAM7GAADPWQAAz2sAAM+EAADPqgAAz8gAANATAADQTgAA0JMAANDFAADQ/wAA0SQAANFaAADRdQAA0YkAANGpAADR0wAA0gwAANI7AADSZwAA0pMAANLJAADS+gAA00UAANN5AADTfQAA06MAANPGAADT6wAA1AoAANQoAADUUAAA1G0AANSbAADUxgAA1PkAANUbAADVNgAA1WIAANV5AADVkwAA1a4AANXHAADV1AAA1eoAANYAAADWGwAA1jUAANY6AADWSgAA1lgAANZkAADWggAA1pkAANalAADWsQAA1tcAANb9AADXEAAA1yEAANczAADXSQAA118AANd0AADXiQAA154AANexAADXxQAA19kAANfrAADX/wAA2BIAANgkAADYOAAA2E0AANhhAADYdQAA2IYAANiYAADYrAAA2MEAANjXAADY6wAA2P4AANkQAADZJAAA2ToAANlQAADZZgAA2XcAANmIAADZmgAA2bAAANnGAADZ2wAA2e8AANoAAADaEwAA2igAANo9AADaUAAA2mQAANp5AADajgAA2qAAANqzAADaxQAA2tkAANruAADbAwAA2xgAANsrAADbPQAA21EAANtmAADbfAAA25EAANujAADbtAAA28YAANvcAADb8gAA3AcAANwbAADcLAAA3D0AANxSAADcZwAA3HwAANyQAADcogAA3LUAANzKAADc3wAA3PIAAN0GAADdGwAA3TAAAN1FAADdVgAA3WgAAN18AADdkQAA3acAAN29AADdzwAA3eAAAN3xAADeBwAA3h0AAN4yAADeRgAA3lcAAN5qAADefwAA3pQAAN6pAADevQAA3s8AAN7iAADe9QAA3woAAN8fAADfMgAA30QAAN9XAADfbAAA34EAAN+UAADfqAAA370AAN/TAADf6QAA3/sAAOAMAADgHQAA4DMAAOBJAADgXwAA4HMAAOCEAADglwAA4KsAAODBAADg1gAA4OoAAOD8AADhDQAA4SEAAOE1AADhSwAA4V8AAOFxAADhhAAA4ZgAAOGqAADhvgAA4dIAAOHkAADh9wAA4gsAAOIgAADiNQAA4kAAAOJLAADiVgAA4mEAAOJsAADidwAA4oIAAOKNAADilwAA4qEAAOKrAADitQAA4r8AAOLJAADi0wAA4t0AAOLoAADi8wAA4v4AAOMJAADjFAAA4x8AAOMqAADjNQAA40AAAONLAADjVgAA42EAAONtAADjeQAA44UAAOORAADjuAAA47wAAOPtAADkIwAA5FYAAOSJAADkvQAA5PAAAOUJAADlJwAA5UoAAOV9AADlrQAA5d0AAOX4AADmEwAA5kIAAOZiAADmiQAA5qMAAOa/AADm/QAA5zsAAOd5AADntgAA5+0AAOglAADoXQAA6JMAAOjCAADo7QAA6RwAAOlPAADpkwAA6eEAAOoFAADqKgAA6mcAAOqcAADq5QAA6yIAAOs9AADrWAAA63wAAOuiAADrvgAA6/QAAOv/AADr/wAA6/8AAOv/AADsFwAA7CQAAOw5AADsQAAA7GUAAOyNAADsswAA7LoAAOzZAADtAwAA7TUAAO1iAADtaQAA7XAAAO2TAADtmgAA7aEAAO2oAADtrwAA7bYAAO3ZAADuBAAA7hYAAO41AADuYQAA7nsAAO6tAADuxgAA7vQAAO8TAADvLAAA70oAAO9tAADvkAAA76kAAO++AADv4AAA8AMAAPAqAADwPAAA8EMAAPBaAADwfAAA8JoAAPC6AADwzwAA8OkAAPEBAADxCAAA8SUAAPFQAADxVwAA8W8AAPGWAADxrwAA8c4AAPHyAADx/QAA8hoAAPIkAADyKQAA8j8AAPJUAADyaQAA8noAAPKVAADypAAA8roAAPLJAADy4QAA8ugAAPLvAADy9gAA8v0AAPMLAADzUAAA81gAAPNfAADzaQAA83MAAPN6AADzgQAA84gAAPOPAADzlgAA850AAPOkAADzqwAA8+gAAPQhAAD0SAAA9HUAAPR8AAD0hgAA9J0AAPS8AAD02AAA9PsAAPUjAAD1PAAA9WEAAPV+AAD1lAAA9bQAAPXJAAD12AAA9d8AAPXyAAD2DAAA9h4AAPZGAAD2ZQAA9nIAAPZ5AAD2iAAA9rcAAPbAAAD2xwAA9s4AAPbVAAD23gAA9uUAAPbsAAD28wAA9voAAPcDAAD3DAAA9xMAAPccAAD3JQAA9y4AAPc3AAD3QAAA90cAAPdOAAD3VwAA914AAPdlAAD3bAAA93MAAPd6AAD3gQAA94gAAPePAAD3lgAA950AAPekAAD3qwAA97IAAPe5AAD3wAAA98cAAPfOAAD31QAA99wAAPfjAAD36gAA9/EAAPf4AAD4LAAA+EwAAPhcAAD4aAAA+IYAAPivAAD4vgAA+NkAAPjgAAD4+AAA+SQAAPk7AAD5ZQAA+YIAAPmJAAD5kAAA+ZcAAPmeAAD5tQAA+ckAAPnnAAD57gAA+hIAAPojAAD6NgAA+lMAAPpvAAD6jQAA+p8AAPq2AAD6xwAA+uQAAPsLAAD7IQAA+0EAAPtWAAD7cgAA+5AAAPu/AAD73gAA++UAAPvsAAD78wAA+/oAAPwBAAD8CAAA/A8AAPwWAAD8HQAA/CQAAPwrAAD8MgAA/DkAAPxAAAD8RwAA/E4AAPxVAAD8XgAA/GUAAPxsAAD8cwAA/HoAAPyBAAD8iAAA/I8AAPyWAAD8nQAA/KQAAPyrAAD8sgAA/LkAAPzAAAD8xwAA/M4AAPz1AAD9IwAA/TkAAP1dAAD9ZAAA/YAAAP2HAAD9pwAA/dkAAP38AAD+AwAA/goAAP4RAAD+GAAA/jUAAP5LAAD+bwAA/pUAAP6/AAD+1QAA/u4AAP8SAAD/MgAA/1YAAP9vAAD/jAAA/5MAAP+1AAD/vAAA/9cAAP/9AAEAGAABADoAAQBhAAEAmQABAL0AAQDEAAEAywABANIAAQDZAAEA4gABAOkAAQDyAAEA+QABAQAAAQEHAAEBEAABARkAAQEiAAEBKwABATIAAQE5AAEBQAABAUcAAQFOAAEBVQABAVwAAQFjAAEBagABAXEAAQF4AAEBfwABAYgAAQGPAAEBmAABAZ8AAQGmAAEBrQABAbQAAQG7AAEB2wABAgYAAQIaAAECOgABAkEAAQJIAAECYgABAmkAAQKIAAECtwABAtkAAQLgAAEC5wABAu4AAQL1AAEC/AABAwMAAQMKAAEDEQABAy0AAQNCAAEDYgABA2kAAQOQAAEDpQABA7oAAQPYAAED9wABBBoAAQQxAAEESwABBFIAAQR1AAEEfAABBJYAAQS6AAEE0QABBPAAAQT3AAEFLgABBVEAAQVYAAEFXwABBWYAAQVtAAEFdgABBX8AAQWGAAEFjwABBZYAAQWdAAEFpAABBa0AAQW2AAEFvwABBcgAAQXRAAEF2gABBeMAAQXsAAEF8wABBfoAAQYBAAEGCgABBhEAAQYYAAEGHwABBiYAAQYtAAEGNAABBjsAAQZCAAEGSwABBlIAAQZbAAEGYgABBmkAAQZwAAEGdwABBoAAAQa0AAEG5QABBxkAAQdMAAEHeAABB60AAQfmAAEIIQABCCgAAQgvAAEIZQABCJkAAQjQAAEJBQABCSgAAQlTAAEJWgABCWEAAQmPAAEJlgABCZ0AAQnXAAEJ4AABCecAAQnuAAEKDAABCjIAAQpJAAEKYgABCoMAAQqhAAEKvgABCuMAAQr6AAELEAABCy4AAQtOAAELbQABC5QAAQubAAELogABC6kAAQuwAAELtwABC74AAQvFAAELzAABC9MAAQvaAAEL4QABC+gAAQwBAAEMCAABDA8AAQwWAAEMHQABDCQAAQwrAAEMMgABDDkAAQxAAAEMRwABDFAAAQxZAAEMYgABDGsAAQx0AAEMfQABDIYAAQyPAAEMmAABDKEAAQzDAAENAAABDUMAAQ1rAAENowABDdAAAQ4NAAEOOQABDm0AAQ6eAAEO2QABDxIAAQ9cAAEPkAABD8sAARAAAAEQFgABECoAARBoAAEQlgABEM0AAREMAAERPgABEX8AARHFAAER9AABEhgAARI9AAESXwABEn8AARKnAAES2QABEwkAARM7AAETagABE50AARPJAAET6wABE/IAARQaAAEUIQABFFgAARSMAAEUvgABFPAAARUsAAEVZQABFYsAARWSAAEVmQABFaIAARXfAAEWAwABFiUAARYsAAEWMwABFkoAARZfAAEWdQABFosAARahAAEWuAABFs8AARbmAAEW/QABFxQAARcrAAEXQgABF0kAARdQAAEXVwABF14AARdlAAEXbAABF3MAARd6AAEXgQABF4gAARePAAEXlgABF7cAARfVAAEX8wABGBEAARgvAAEYTgABGGwAARiLAAEYqgABGMoAARjqAAEZCwABGTQAARlbAAEZgQABGagAARnPAAEZ9gABGh0AARpEAAEabAABGpQAARq8AAEa5QABGuUAARr7AAEbEQABGycAARsuAAEbNQABGzwAARtbAAEbeQABG5cAARu+AAEb5AABHAoAARwoAAEcRgABHFsAARx4AAEclQABHK0AARzFAAEc6AABHQsAAR0yAAEdYQABHZIAAR3HAAEd9AABHikAAR4/AAEebAABHp4AAR63AAEe1QABHvoAAR8dAAEfRgABH20AAR90AAEfewABH4IAAR+JAAEfkAABH5cAAR+eAAEfpQABH8EAAR/gAAEf/AABIAMAASAiAAEgPgABIEUAASBhAAEgegABIIEAASCnAAEg2QABIRAAASEpAAEhPwABIUYAASFcAAEhcwABIYUAASGeAAEhxwABIfQAASIEAAEiLwABImkAASKBAAEinwABIqYAASKtAAEitAABIrsAASLEAAEizQABItYAASLdAAEi5AABIusAASLyAAEi/QABIxIAASMgAAEjOwABI2wAASN3AAEjkAABI6kAASO+AAEjygABI9cAASPeAAEj7gABI/sAASQXAAEkJwABJEEAASRqAAEkgAABJJ8AASTJAAEk0wABJQoAASU0AAElUQABJWsAASV2AAElgAABJYsAASW1AAElwwABJdAAASXeAAEl6gABJfEAASYUAAEmHwABJkIAASZeAAEmYwABJmoAASZxAAEmgQABJpEAASaqAAEmwwABJu0AASb+AAEnHgABJyUAASc9AAEnPQABJ0gAAIAwQAABAoFtgADAAcAH0ANBAMCBQMFCAkEAwcAAwA/Mi8zERIBOTkRMxEzMTATIREhNyERIcEDSfy3aAJ5/YcFtvpKaATmAAIAdf/lAdMFtgADAA8AQ0AnAgQDCgQKEBEQASABAqABsAHQAQMPAR8BAgkDAQECDQ0HfVkNEwIDAD8/KxESADkYL19eXV1xERIBOTkRMxEzMTABIwMhATQ2MzIWFRQGIyImAaD0MwFa/qJaVlNbXFJUXAHlA9H62VRWWFJPW1kAAgCFA6YDQgW2AAMABwAfQA0AAwQHAwcICQYCBwMDAD8zzTIREgE5OREzETMxMAEDIwMhAyMDAZwpxSkCvSnFKQW2/fACEP3wAhAAAAIALQAABP4FtgAbAB8AhEBKGBkUBwYVChAICxwOHw8VEgEdGRYDBBsAAAQWHh0HEg8OCwwLICEAHxAQGRU/EU8RnxEDERETBAgMDAEcMA1ADQINDQoXEwMGChIAPzM/MxI5L10zMzMRMzMROS9dMzMzETMzERIBFzkRMxEzETMRMxEzETMRMxEzMzMRMxEzETMxMAEHIRUhAyMTIwMjEyM1ITcjNSETMwMzEzMDMxUFMzcjA+cvAQL+103cTsJM10ruARUv/AEhTdtNxk7XTvD9HcQvxANM6M7+agGW/moBls7o0QGZ/mcBmf5n0ejoAAMAWP+JBEQGEgAgACYALAB7QEYZAAgnER0lFwMEBAwqFAMFACEhBREDLS4kDQ0dKioGFyUMBgx0WQUDAAYQBgINAwYrHBccc1kVFCAXUBcCgBewFwI/FwEXAC9dXXEzMysRADMYL19eXTMzKxEAMxESOREzMxEzERIBFzkRMxEXMzMRFzMRMzMRMzEwARQGBxUjNSYnERYWFxEnJiY1NDY3NTMVFhcHJicRHgIFNCYnFTYBFBYXNQYERObPifS4V/VgQ8al48uJ5blenKTDpU3+00REiP5uPUSBAcmfwRPNyQVRAQgrQgYBNhpOt4eRuxSZlQpS6kAO/tlLboRnKjof+RcCviw5HusTAAAFAD//7gb2BcsACQAUABgAIgAtAE1AKgAQCgUZKSMeHhUpBRcQBi4vICswK0ArAgMNKw0rDSYSGAMXEgcSBBwmEwA/Mz8zPz8REjk5Ly8RM10RMxESARc5ETMRMxEzETMxMAEUFjMyNTQjIgYFFAYjIiY1ECEyFiUBIwETFBYzMjU0IyIGBRQGIyImNRAhMhYBOy0yYGAyLQG7sqyltAFZqbUCsPzV8AMrhS0yYGAyLQG7sqyltAFZqbUEAH99/Pp7febn7eABye3Y+koFtvwCf338+nt95eft3wHJ7QAAAAMAUv/sBgAFywAdACYAMQB9QEIAMyMnHgctDS8KFiQTJx0XAhoCJyQKDQcHMjMCAhAZGQEQHSMvFgoDJBACHQQjFyEZGQQQARIQKmxZEAQEIWlZBBMAPysAGD8rABg/ERI5LxI5ORI5ORESFzkRMxESOS8ROS8REgEXOREzMxEzETMRMxEzETMRMxEzMTAhIScGIyIkNTQ2NyYmNTQ2MzIWFRQGBwE2NyEGAgclFBYzMjcBBgYBNCYjIgYVFBc2NgYA/odzv/H0/uJ5k0tE6cO634qaARxHNAE+JH5Q/MCBZX5l/rQ6QwFnSDlDTV9WXHGF4L+JwVRWnV2Yuq2Rd8VZ/ut1uIf+/2ODVmY9AUosYAKGNT1AO1hqMF0AAQCFA6YBnAW2AAMAFLcAAwMEBQIDAwA/zRESATkRMzEwAQMjAwGcKcUpBbb98AIQAAAAAAEAUv68AnkFtgANABxADAcABAoACg4PAwMLJAA/PxESATk5ETMRMzEwExASNzMGAhUUEhcjJgJSm5L6jZCTiPiTmgIxAQkBzq7B/jL09f43uaoBxgAAAAABAD3+vAJkBbYADQAcQAwKBAAHBAcODwoDBCQAPz8REgE5OREzETMxMAEQAgcjNhI1NAInMxYSAmSbkviHlJCN+pOaAjH++f46qLgByfb0Ac7Br/4xAAAAAQA/AlYEHQYUAA4AMEAaAA4BDQQHAwUHDQ4KCQsIDxACCwMDDAwIDgAAP8Q5LxczERIBFzkRMxEzETMxMAEDJRcFEwcDAycTJTcFAwKwKQF1If6s3+Ociezd/q4nAW0pBhT+kGj8GP7XeQE5/sl3ASka+mgBcAAAAQBYAOMEOQTFAAsASkAuBgoKAwsICwEDDA0LCQEBBAa4AgFlAgFKAtoCAjkCAQ8CjwICLwJvAp8C7wIEAgAvXXFdXV1dMzMzETMzERIBFzkRMzMRMzEwASE1IREzESEVIREjAdv+fQGD2wGD/n3bAmTbAYb+etv+fwAAAAEAP/74AcsA7gAGACRAFAMABwjAA9ADAgPAzwUBBUAJDEgFAC8rXRrOXRESATk5MTAlBgMjEjchAcs0fNxBJAEY18r+6wEK7AAAAAABAD0BqAJWAqIAAwAiQBQAAwQFAA8BAU8BfwGvAc8B3wEFAQAvXXEzERIBOTkxMBM1IRU9AhkBqPr6AAABAHX/5QHTATkACwAWQAoABgwNCQN9WQkTAD8rERIBOTkxMDc0NjMyFhUUBiMiJnVaVlNbXFJUXI9UVlhST1tZAAAAAAEADgAAA0QFtgADABO3AgAEBQMDAhIAPz8REgE5OTEwAQEhAQNE/d/+6wIhBbb6SgW2AAACAEr/7ARIBc0ACwAXAChAFAwGABIGEhgZCRVzWQkHAw9zWQMZAD8rABg/KxESATk5ETMRMzEwARACISIAERASITIAARAWMzI2ERAmIyIGBEj7/vv9/v/6AQT9AQP9NV1ubGBha21eAtv+gf6QAXwBcwGDAW/+gP6O/vPp7AEKAQ3r6wABAHkAAANOBbYACgAmQBEAAgQBCAELDAQJBwcBCQYBGAA/PxI5LxE5ERIBOTkRMzMzMTAhIRE3NwYHBycBMwNO/ssDBU0eqJUB1/4DTouYTRiHugF3AAAAAQBOAAAEUAXLAB0ANkAbHAcAFg4BFgcBBx4fEgp2WRIHAhwBARx2WQEYAD8rERIAORg/KxESATk5ETMRMxEzETMxMCEhNQE+AjU0JiMiBgcnPgIzMhYWFRQGBgcHFSEEUPwCAW+jZCxhUVWgV6hsjqhoidJ0R5W8vAJ91wFzp4FuO1hWTkjHXEwpZLR0ZbG6rLEOAAEATv/sBEIFywAmAIBATwMEBBwiDAAcBxMTHBcMBCcoAxcYGBd0WUUYAcUYAToYASoYARgiDUmZGKkYAhgYAQgYAQoPGI8Y7xj/GAQWAxgYCiQkHnNZJAcKEHNZChkAPysAGD8rERIAORgvX15dXl1xXStdcV1xKxESADkREgEXOREzETMRMxEzETMxMAEUBgcVFhYVFAQhIicRFhYzMjY1NCYjIzUzMjY1NCMiBgcnNiEyBAQXppaxtv7O/uTuuFXMZJmSqLhvcaqd0EiVW4/IARXjAQcEb4nAJAYWq5HT608BBys2aHNnVu1ZbKYwO9WQuAACACMAAARxBbYACgATAEZAIwACEwUJAgILDwMFAxQVDwMHAQUTBXRZBglPEwETEwMHBgMYAD8/EjkvXTMzKxEAMxESORESATk5ETMzMxEzETMRMzEwASMRIREhNQEhETMhNTQ2NyMGBwEEcbD+0v2QAoEBHbD+IgoDCCU0/vQBL/7RAS/XA7D8afg+7BNSTv5rAAAAAQBk/+wENQW2ABsAPUAfFwMIGRQDDhQOHB0AEHNZAAAGFRUYdlkVBgYMc1kGGQA/KwAYPysREgA5GC8rERIBOTkRMxEzMxEzMTABMhYVFAAhIicRFhYzIDU0ISIGBycTIREhAzc2AmbU+/7S/uf0lk/SXgEb/ts1gCh7NwMZ/fYbIz0Dpu7P9f74TwELKjXo3RUMQgLp/vr+4QcOAAAAAAIASP/sBFAFxwAYACQAQkAhBhIMABIcABwlJgwVDw8fdFkPDxUDAwh0WQMHFRlzWRUZAD8rABg/KxESADkYLysREgA5ERIBOTkRMxEzETMxMBMQACEyFxUmIyIGBgczNjMyFhUUACMiJgIFMjY1NCYjIgYVFBZIAW8Bbn1HWVefyWQJDWPaxN7++Oqi8YMCEGNqY2RehX0CbQGyAagP9xRgvK2q9tnq/u+WASC/hXtre3pRd6QAAAABADcAAARQBbYABgAnQBMFAQEAAgMHCAAYBQIDAwJ2WQMGAD8rERIAORg/ERIBFzkRMzEwMwEhESEVAeMCJf0vBBn91wSyAQTC+wwAAwBI/+wESgXJABcAIgAuAExAJwYeEhgYDyYVAywJHh4sFQ8ELzASBiEhKSkMAAAjdVkABwwbdVkMGQA/KwAYPysREgA5ETMSOTkREgEXOREzETMRMxEzETMRMzEwATIEFRQGBxYWFRQEIyIkNTQ2NyYmNTQkAxQWMzI2NTQmJwYTIgYVFBYXNjY1NCYCStIBAXyKpI/+5ubw/u6Fk31uAQQTeGhzcnF/1eJPYU1lYk5kBcm/onCvRVi/crTbzLt9wkpPtGudwvu8VmBjUUN1QmICzFFEPF8yLmA/RVAAAAACAEL/7ARKBccAGQAlAEJAIgUdEwAMDCMTAyYnDRAWECB0WRAQAxYWGnNZFgcDCHRZAxkAPysAGD8rERIAORgvKxESADkREgEXOREzETMzMTABEAAhIic1FjMyNjY3IwYGIyImNTQAMzIWEiUiBhUUFjMyNjU0JgRK/pT+j4JDVFybyGoIDDqYcr/cAQvmovOC/e9gbGJkXoZ9A0b+UP5WDvgVW8OrXkz12usBEZj+38GEfGp8e1B3pAAAAAIAdf/lAdMEcwALABcAKEAUDAASBgAGGBkPFX1ZDxAJA31ZCRMAPysAGD8rERIBOTkRMxEzMTA3NDYzMhYVFAYjIiYRNDYzMhYVFAYjIiZ1WlZTW1xSVFxaVlNbXVFUXI9UVlhST1tZA4tUVlhSUVlYAAAAAgA//vgB0wRzAAYAEgA4QCANAQEHBAMTFAAEEAQCBMAPBgEVAwZACQxIBgoQfVkKEAA/KwAYLytfXl0azl0REgEXOREzMTAlFwYDIxI3AzQ2MzIWFRQGIyImAbwPNHzcQSQvWlZTW11RVFzuF8r+6wEK7ALbVFZYUlFZWAAAAAABAFgAywQ5BQAABgAmQBQFAQQAAQAHCACAA7ADAm8D7wMCAwAvXV0vERIBOTkRMxEzMTAlATUBFQEBBDn8HwPh/VQCrMsBto8B8PD+w/7nAAIAWAGiBDkEAAADAAcAOkAjAAQDBwQHCAlHBQEFIARQBGAEAwRXAAFIAAEAPwFfAX8BAwEAL10zXV3GXTJdERIBOTkRMxEzMTATNSEVATUhFVgD4fwfA+EDJ9nZ/nvb2wAAAAABAFgAywQ5BQAABgAmQBQCBgUBBgEHCAaAA7ADAm8D7wMCAwAvXV0vERIBOTkRMxEzMTATAQE1ARUBWAKs/VQD4fwfAboBGQE98P4Qj/5KAAIABv/lA6AFywAZACUASkAsGQASBwcgABoNBSYnoACwANAAAw8AHwDfAAMJAwAAIw8jHX1ZIxMPCn5ZDwQAPysAGD8rERIAORgvX15dXRESARc5ETMRMzEwATU0Njc2NjU0JiMiByc2MzIWFRQGBwYGFRUBNDYzMhYVFAYjIiYBFFJtaUNgVpbAbd/6zvNkjGAz/tdaVlNbXFJUXAHlSmCOUEteOkFEYtt9xqVuoGRHSjw8/qpUVlhST1tZAAAAAAIAZv9UBscFtgA1AEAAUUArFSkUIi42DhQ8ABoaPA4uBEFCPpAR4BHwEQMRETIECxc4OAsLKx4yAyUrJQA/Mz8zEjkvMxEzETMROS9dMxESARc5ETMRMxEzETMRMzMxMAEUBgYjIiYnIwYGIyImNTQAMzIWFwMUMzI2NTQmJiMiBAIVEAAhMiQ3FQYhIAARNBIkITIEEgEUMzI2NzcmIyIGBsdcqG9KcxgQMYxWscwBCdtR1EMXTEBMhfOdyf7UngEnARhwAQBz1P75/oT+V9sBkgEC3AFavPwArFlfCg0uRX2LAt2Q8IZHOjxF1bjRAQQfGP4XjLuXofeDqP7IzP7r/tkyK8FaAZABZPcBlOO0/rT+qtN/kN0KnAAAAgAAAAAFhQW8AAcADQAsQBUHBA0IDg8LBAUNAmlZDQ0FAAQSBQMAPz8zEjkvKxESADkREgE5OTIzMTAhAyEDIQEhAQECJicGAwQ3av3rav6yAgQBewIG/f6TJQghnAFc/qQFvPpEAmAB2XwkgP4HAAMAuAAABPQFtgAPABgAIACKQFMHCAgeEBoaDwQUCx4eFA8DISIHGRAQGWtZgBCQEAJGEAHWEAEQJBtJECQUSUwQAQOsEAEEOhABGRABGRABAw8QAQkGEBAPAAAYaVkAAw8aaVkPEgA/KwAYPysREgA5GC9fXl1fXXFdX11fXSsrXXFxKxESADkREgEXOREzETMRMxEzETMRMzEwEyEgBBUUBgcVFhYVFAQjIQEzMjY1NCYjIxERMzI2NTQhuAHHATcBGXtmi3v+3/j93QE2tH5xe4WjyoB6/vwFtrHBg6gRCh+qjcjgA3NOWlRJ/cX+g2JltgABAHf/7ATRBcsAFgAmQBQDDRMIDQMXGBEAaVkRBAoFaVkKEwA/KwAYPysREgEXOREzMTABIgIVECEyNxEGIyAAETQSJDMyFwcmJgMlr8ABb5rbtN7+wf6upgE30dXXZFKmBMn++ev+F03+/EsBgwFq5AFXt2f8JzoAAAAAAgC4AAAFdQW2AAgADwAoQBQNBAAJBAkQEQUMaVkFAwQNaVkEEgA/KwAYPysREgE5OREzETMxMAEQACEhESEgAAEQISMRMyAFdf5l/nz+YgHLAWYBjP6+/mClhQHAAun+l/6ABbb+hv6lAdf8SAAAAAEAuAAABAIFtgALAHJARgYKCgEEAAAIAQMMDQYJaVlGBgHWBgESBgEDIQYBsQYBBEwGAaMGAQYeDEkZBgEDDwaPBgIJBgYGAQICBWlZAgMBCmlZARIAPysAGD8rERIAORgvX15dX10rXV1fXXFfcV1xKxESARc5ETMRMxEzMTAhIREhFSERIRUhESEEAvy2A0r97AHv/hECFAW2/v6//v6HAAAAAQC4AAAD/gW2AAkAS0AsBgAAAQQIAQMKCwYJaVnIBgFZBgEMBgENBh4MSQ8GAQ8DBgYCARICBWlZAgMAPysAGD8SOS9fXl0rXl1dXSsREgEXOREzETMxMCEhESEVIREhFSEB6f7PA0b96wHw/hAFtv7+h/0AAAAAAQB3/+wFJwXLABoAR0AlDQIaGBMIAhgIGBscABppWQ0AAQsEAAAFCwsQaVkLBAUWaVkFEwA/KwAYPysREgA5GC9fXl0rERIBOTkRMxEzETMRMzEwASERBgYjIAAREAAhMhcHJiMiAhUUFjMyNxEhAuMCRI35gv61/qMBlQFn4dFnoK3J8sO6YWT+6wM1/QouJQGFAWwBYgGMWvhQ/vLk7vsUATEAAAAAAQC4AAAFZgW2AAsAaEA/CAQEBQAJAQUBDA0ACAEMBggDaVlGCAHWCAESCAEDIQgBsQgBBKMIAUwIATsIARkIAQgIiAgCCAgFCgYDAQUSAD8zPzMSOS9dXV1dXV9dcV9xXXErAF9eXRESATk5ETMzETMRMzEwISERIREhESERIREhBWb+y/29/soBNgJDATUCd/2JBbb9wwI9AAAAAQBCAAAC2wW2AAsAOUAcCAAACgUBAQoDAwwNCAUGBW5ZBgMLAgECblkBEgA/KxEAMxg/KxEAMxESATkRMzMRMxEzETMxMCEhNTcRJzUhFQcRFwLb/WeysgKZsrKwUgOyUrCwUvxOUgAB/2j+UgHuBbYADQAfQA4CCwgIDg8JAwAFaVkAIgA/KwAYPxESATkRMzIxMBMiJxEWMzI2NREhERACH2lOUEJmWAE26v5SFgECFH+HBVr6qP8A/vQAAAAAAQC4AAAFUAW2AAwAOEAbCwAADggEBAUMAgUCDQ4CDAgDAwMFCgYDAQUSAD8zPzMSOREXMxESATk5ETMRMxEzETMRMzEwISEBBxEhESERNwEhAQVQ/qD+gYP+ygE2egGMAVj+AgJoXv32Bbb9Y6wB8f15AAABALgAAAQ/BbYABQAfQA4DAAAFBgcBAwADaVkAEgA/KwAYPxESATk5ETMxMDMRIREhEbgBNgJRBbb7Sv8AAAAAAQC4AAAG0wW2ABQANEAaAgUFBg0SEw8DDgYOFRYJARMDBgsHAwAOBhIAPzMzPzMSFzkREgE5OREXMzMRMxEzMTAhASMSFREhESEBMwEhESERNDYTIwEDI/6gCRP+6wGmAVoGAW8Bpv7fAwwJ/ocEe/6idf1YBbb7ogRe+koCtDGAART7hwAAAAEAuAAABckFtgAPACxAFAMGBgcADQsHCxARAwsHDggDAQcSAD8zPzMSOTkREgE5OREzMxEzETMxMCEhASMSFREhESEBMwI1ESEFyf52/YQJE/7rAYcCewcPARcEUv7bff1QBbb7uQEddgK0AAIAd//sBecFzQALABUAKEAUDAYAEQYRFhcJE2lZCQQDD2lZAxMAPysAGD8rERIBOTkRMxEzMTABEAAhIAAREAAhIAABFBYzIBEQISIGBef+mP6w/rD+mAFpAVEBUQFl+9W6uQFz/o+5vALd/pX+egGGAW0BbQGB/nz+lPX4Ae0B7vkAAAIAuAAABKoFtgAIABMAQEAjAA4ODwkEDwQUFQ0AaVlQDQEPDR8NAgkDDQ0QDxIQCGlZEAMAPysAGD8SOS9fXl1dKxESATk5ETMRMxEzMTABMzI2NTQmIyMFFAQhIxEhESEgBAHuZo+Od3+NArz+2f7whf7KAdMBCgEVAwZxbG1oyuz6/fgFtuUAAAAAAgB3/qQF5wXNAA8AGQBCQCEEAAMVEAoAFQoVGhsABQEJAwUHDRdpWQ0EAwcHE2lZBxMAPysRADMYPysAGBDGX15dERIBOTkRMxEzETMRMzEwARACBwEhASMgABEQACEgAAEUFjMgERAhIgYF57exAWD+c/70F/6w/pgBaQFRAVEBZfvVurkBc/6PubwC3f7+/qNR/ncBSAGGAW0BbQGB/nz+lPX4Ae0B7vkAAAIAuAAABUgFtgAIABYAS0AnFBgTBAAKCgsQBAsEFxgTCQAJaVkAABAAAhADAAAMFQsSDAhpWQwDAD8rABg/MxI5L19eXSsRADMREgE5OREzETMRMxEzETMxMAEzMjY1NCYjIxERIREhIAQVFAYHASEBAe5kk4yPll7+ygGqASoBHo6CAa7+qP6jAy1iaWhY/Xn9zwW22d2ByTn9gwIxAAABAF7/7AQXBcsAJwA6QB4aAAUhFAAMFAwoKSEAFAwEAxcXHmlZFwQDCWlZAxMAPysAGD8rERIAFzkREgE5OREzETMzETMxMAEUBCMiJxEWFjMyNjU0JiYnLgI1NCQzMhYXByYmIyIGFRQWFhcWFgQX/uP+6rSUzVVmbTBdj4aGUAEH6HLPcWR1mUpYXiZTm82YAZbG5FgBIEI2Tk0rQz5EP3SaZ8LeNjHxMCZSQik9OUpixQABACkAAAR5BbYABwAlQBIAAQYBAwMICQESBwMEA2lZBAMAPysRADMYPxESARc5ETMxMCEhESERIREhAuz+yv5zBFD+cwS0AQL+/gAAAQCu/+wFXgW2ABIAJUARCwgBEQgRExQSCQMFDmlZBRMAPysAGD8zERIBOTkRMxEzMTABERQGBCMgADURIREUFjMyNjURBV6R/u67/ub+yAE1iJ2YiQW2/E6i9IIBIfsDrvyBqZ6fqgN9AAAAAQAAAAAFMwW2AAsAGkALAQ0EDAkDAAQDAxIAPz8zEjkRATMRMzEwASEBIQEhARYWFzY3A/oBOf4P/q7+EAE5ARMXMQYLQAW2+koFtvyaTc0oXOYAAQAAAAAHvAW2AB0AIkAQHR8LHgUPGAMKHBMLAwEKEgA/Mz8zMxIXOREBMxEzMTAhIQMmAicGBgcDIQEhExYXNjY3EyETFhYXNjY3EyEGSP6fxgs1BAYwDcX+oP6LATG7MRYGKxPVASXVDioLCiwSugExAwApAQEsNu8z/QIFtvzi3aI570IDM/zNN+JRTulIAx4AAAEAAAAABVYFtgALADRAGQoAAA0GBAgFCwICBQQDDA0IAgQJBgMBBBIAPzM/MxI5ORESARc5ETMRMxEzETMRMzEwISEBASEBASEBASEBBVb+nv6s/qz+tAHl/joBVgE7ATUBTv41Ain91wLyAsT98gIO/SsAAQAAAAAE/gW2AAgAIkAPAgoHBAUFCQoABQEHAwUSAD8/MxI5ERIBOREzMhEzMTABASEBESERASECfwExAU7+G/7M/hsBUANcAlr8g/3HAi8DhwAAAQAxAAAEcQW2AAkAOEAdBAEHAAADCAEECgsHBAUFBGlZBQMCCAEBCGlZARIAPysREgA5GD8rERIAORESARc5ETMRMzEwISE1ASERIRUBIQRx+8ACvf1WBBr9RALPyQPtAQDI/BIAAAABAI/+vAJzBbYABwAiQA8EAAAGBgEBCAkFAgMGASQAPzM/MxESATkRMxEzETMxMAEhESEVIxEzAnP+HAHk4OD+vAb60/qsAAABAAwAAANCBbYAAwATtwMBBAUDAwISAD8/ERIBOTkxMAEBIQEBIQIh/uv93wW2+koFtgAAAQAz/rwCFwW2AAcAIEAOAwcHBgEBCAkDBAMAByQAPzM/MxESATkRMzMRMzEwFzMRIzUhESEz398B5P4ccQVU0/kGAAABAC8CCARkBb4ABgAXQAkAAwcIBQQAAQMAP80yORESATk5MTATATMBIwEBLwG2kAHv7/6+/ugCCAO2/EoCg/19AAAAAAH//P68A07/SAADABG1AAUBBAECAC8zEQEzETMxMAEhNSEDTvyuA1L+vIwAAQFMBNkDjQYhAAgAGkAMAwgJCgSADwBfAAIAAC9dGs0REgE5OTEwASYmJzUhFhcVAsM/9EQBVj+sBNksxUIVZcgbAAACAFb/7AQ7BHUAGAAiAFdAMBIdCBgMIggiIyQCABQMGWdZTwxfDAIDHwwBDAwUABUPDwEMBhQPYFkUEAUfX1kFFgA/KwAYPysAX15dGD8SOS9dX10rERIAORESATk5ETMzETMzMTAhJyMGBiMiJjU0Njc3NTQjIgcnNjMyFhURAQcGBhUUMzI2NQNmOwhNo4Ohufn7wq6GtWXB6+Hw/tF2hYKUan+YYUu4qrKpCQYxqlHOZcTI/RcCBgQEWFqBemUAAAAAAgCg/+wEtAYUABIAHwA4QBwQCwMdCx0gIRAJBgAMAAsVABNdWQAQBhpdWQYWAD8rABg/KwAYPz8REjk5ERIBOTkRMxEzMTABMhIREAIjIicjByMRIREUBzM2FyIGBxUUFjMyNjU0JgMOxuDnx8VwFTPpATEMDGtwcWgCa3Reb3AEc/7L/vP+6/7Qj3sGFP6WRZim9IugIbScraWlpQAAAAEAXP/sA90EcwAVACZAFA0CBxMCAxYXBQtdWQUQAA9dWQAWAD8rABg/KxESARc5ETMxMAUgERAAITIXByYmIyIREDMyNjcRBgYCZv32ARwBCcKaWkh8Pu7uWJZLSpcUAj0BHQEtTOwdJf6u/rgvMv77LyQAAgBc/+wEcQYUABIAHwA8QB4dAw4LCRYDFiAhCREABgwADxUGGl1ZBhAAE11ZABYAPysAGD8rABg/PxESOTkREgE5OREzMzMRMzEwBSICERASMzIXMyY1ESERIycjBicyNjc1NCYjIgYVFBYCAsXh5cnTbwoXATLqOw1oanVtBW99ZnFyFAEyAQ8BEwEzpH1iAWb57JGl84ijIbScraWlpQAAAAACAFz/7ARiBHMABgAbAGBANhkRAxISChEECgQcHQ8SAQ0FAxJmWeUDAakDAUwDXAMCAwMDBw0PAAEMBg0AX1kNEAcVYFkHFgA/KwAYPysAX15dERI5GC9fXV1dKwBfXl0REgE5OREzETMRMxEzMTABIgYHISYmAyAAERAAMzIAFRUhFhYzMjY3FQYGAm9hbggBrAJyNv7y/tABGfjtAQj9LwWQgmW0YlC2A5p7cXF7/FIBKgERARkBM/7y7pSCkiou7CgnAAAAAAEAKQAAA3UGHwAVAEFAIA0XAAIFAxQCAgcDAxYXAxULEF1ZCwEHFAQBFAFgWRQPAD8rEQAzETMYPysAGD8REgE5ETMzETMRMxEzETMxMAEhESERIzU3NTQ2MzIXByYjIgYVFSEDCv74/s+oqLzPnntOXE5BOgEIA3n8hwN5k1JSv7Av4B1NPEYAAAAAAgBc/hQEcQRzAAsAJgBGQCURCR4MJAMDGBYeBCcoIxkbISUPIQddWSEQGwBeWRsWDxRfWQ8bAD8rABg/KwAYPysAGD8REjk5ERIBFzkRMzMRMzMxMCUyNjU1NCYjIhEUFgUUBCEiJzUWMzI1NTcjBiMiAhEQEjMyFzM3IQJve2pve9drAnP+5/7q9a3L6esJCWvSyd3lyc52CBkBAtuNniWznf6uqKbd8flC9Fb+FomlATYBCwETATOkjwABAKAAAASoBhQAFQA0QBkPDAgICQABCQEWFw8JEgoAAQkVEgRdWRIQAD8rABg/Mz8REjkREgE5OREzETMRMzMxMCEhETQjIgYVESERIREUBwczNjMyFhUEqP7PtIBy/s8BMQcHEGbexcwCjfKuw/3yBhT+wyWJWqTUxgAAAgCTAAAB3wYUAAgADAAzQBsECQkACgoNDhkHAQMPBwEKBgIHY1kCAAsPChUAPz8/KwBfXl1fXRESATkRMzMRMzEwEzQzMhUUBiMiASERIZOmplNTpgE+/s8BMQV/lZVHT/sXBF4AAAAC/33+FAHfBhQADQAWADxAIAISCwsOCAgXGBkVAQMPFQEKBhAVY1kQAAkPAAVdWQAbAD8rABg/PysAX15dX10REgE5ETMzETMyMTATIic1FjMyNjURIREUBgM0MzIVFAYjIkZ1VEZJTUcBMc5wpqZTU6b+FBnwE1ZUBKr7KbLBB2uVlUdPAAAAAQCgAAAE9gYUAA4AN0AbAwUFEA4KBAcKBw8QBwQAAwgICgILAAIPBgoVAD8zPz8REjkRFzMREgE5OREzETMRMxEzMTABNwEhAQEhAQcRIREhEQcBxYUBOQFY/kQB1/6g/r6D/s8BMRACYKoBVP4b/YcBxWn+pAYU/Ur+AAEAoAAAAdEGFAADABZACQABAQQFAgABFQA/PxESATkRMzEwISERIQHR/s8BMQYUAAEAoAAAB0IEcwAjAD5AHw0KAAEbHBwBCgMkJRMNChELDxwBChUgBREFXVkXERAAPzMrEQAzGD8zMz8REjk5ERIBFzkRMxEzETMxMCEhETQmIyIGFREhETMXMzY2MzIXMzY2MzIWFREhETQmIyIGFQSJ/s9RV3Vq/s/pKREtqm77WRstr26+w/7OUVdwbwKNeXmsxf3yBF6PTVekTlbD1/0nAo15eaCuAAEAoAAABKgEcwAUAC5AFg0KAAEKARUWDQoRCw8BChURBV1ZERAAPysAGD8zPxESORESATk5ETMRMzEwISERNCYjIgYVESERMxczNjYzMhYVBKj+z1ZegHL+z+kpETOzcsPKAo15eavG/fIEXo9RU9PHAAAAAgBc/+wEmARzAAsAGQAoQBQAEwwGEwYaGxYJXVkWEA8DXVkPFgA/KwAYPysREgE5OREzETMxMAEUFjMyNjU0JiMiBgUQACEiJgI1EAAhMhYSAZNte3prbHt6bAMF/uD+/6H2hAEeAQOh9oQCMaaqqaempqWn/u/+zI0BCLABEgEwjP76AAAAAAIAoP4UBLQEcwATAB8APEAeCgMDBxAdBx0gIQMKAA0IDwcbDRRdWQ0QABtdWQAWAD8rABg/KwAYPz8REjk5ERIBOTkRMxEzETMxMAUiJyMWFREhETMXMzYzMhIRFAIGAyIGBxUUFjMyETQmAwbFcBAQ/s/4Kw5r0sbgacLdcWgCa3TNZRSPjBb+OwZKkab+zv7ws/74igOTi6AhtJwBUqWlAAAAAgBc/hQEcQRzAAsAIABEQCMJDxkWAx0dGg8DISIWFhcdHh4MFw8aGxIHXVkSEAwAXlkMFgA/KwAYPysAGD8/EjkvMxE5LxESARc5ETMzMxEzMTAlMjY3NTQmIyIRFBYXIgIREBIzMhYXMzchESERNDcjBgYCb3RsBW9712sExuDlx2qePAgbAQL+zg0NMaLbhaYltJz+rqim7wExARABEgE0UFSP+bYB1T1rUVQAAAABAKAAAAN3BHMAEAAnQBMNCgoCERIOCgAABWRZABALDwoVAD8/PysREgA5ERIBOTkRMzEwATIXAyYjIgYVESERMxczNjYDED4pFyU1kqP+z+ctDzSxBHMJ/uIKlof9xwRevF5zAAAAAAEAXP/sA6wEcwAlADpAHhkABh8UAAwUDCYnFB8ADAQDFxcdX1kXEAMKX1kDFgA/KwAYPysREgAXORESATk5ETMRMzMRMzEwARQGIyImJzUWFjMyNTQmJicuAjU0NjMyFwcmJiMiFRQWFx4CA6zv7nqsS1XVUaYsbFqBeTfn1Mq/XFSSTIdXk4N6OgFMrLQhIPwoNmAkLTkmNlx3V5WjWNwkLkkpPDs1XHgAAAABAC//7AM3BUwAFQA+QB4KCA8TEwgRAwgDFhcJEgwOD0APEmBZDw8FAF1ZBRYAPysAGD8rABoYEM0zETMREgE5OREzETMRMxEzMTAlMjcVBiMiJjURIzU3NzMVIRUhERQWAndQcHKmt6eSqFjDATn+x0nfI+MzubkCG4Fm7O7l/eVBPgABAJr/7ASiBF4AFAAuQBcLCBQBAREIAxUWAgUSCQ8AFQUOXVkFFgA/KwAYPz8zEjkREgEXOREzETMxMCEnIwYGIyImNREhERQWMzI2NREhEQO4KRAxtHPFyAExVl6AcgExj05V08YC2f1zeXmrxgIO+6IAAAEAAAAABI0EXgALABhACgoNAQwFCQEPABUAPz8zOREBMxEzMTAhASETFhczNjcTIQEBqv5WAT/YIQoIBifXAT/+VgRe/YNofXF0An37ogAAAQAUAAAGxQReABsAIkAQBhocHQIKFAMFGQYPDwAFFQA/Mz8zMxIXORESATk5MTAhAwMjAyEBIRMWEzM2NzcTIRMWFhczNjY3EyEBBDdWcgnM/rj+wgEwgRYnCAQfEIoBUIMMIwIICygOhgEr/r4BhwHu/IsEXv4RWP7pTKVVAhj96DfVOlnjMwHv+6IAAAAAAQAKAAAElgReAAsAL0AXAQsGAwkFBwcJAAsEDA0JAwsEAQ8ICxUAPzM/MxI5ORESARc5ETMRMzMRMzEwAQEhExMhAQEhAwMhAYX+mAFa2dsBWv6UAX3+pevs/qYCOwIj/pwBZP3d/cUBf/6BAAAAAAEAAP4UBI0EXgAWAClAEwkADxYXGAQWFg0IAA8NEl1ZDRsAPysAGD8zEjkRMxESATk5MjMxMBEhExYXMzY3EyEBBgYjIic1FjMyNjc3AU7TGwoGCyDPAUf+J0HxoU9MN0FReSISBF79i1JwZ1sCdfsTr64R8g1jZDcAAAABADcAAAOqBF4ACQA1QBwHAAADCAQBBQoLBwQFBQReWQUPAggBAQheWQEVAD8rERIAORg/KxESADkREgEXOREzMTAhITUBITUhFQEhA6r8jQIG/hkDQv4IAgq0AsHpxv1RAAAAAQAf/rwC1QW2AB8AVEAvDRsbFxMUFAMRFxcHHwMfICETAwMPBG8EnwQDbgQBSwTLBAI6BAEEBBsNDAMaGyQAPzM/MxI5L11dXXEzEjkREgE5OREzMxEzETMRMxEzETMxMAE0JiM1MjY1ETQ2NjMVBgYVERQHFRYVERQWFxUiJiY1AR+DfX2DUrupY0vq6kxipr1TAQ5hUu9RYQE+b3I14QJDSP7VuyMMI7r+1UlCAuI0cnEAAQHH/i8CogYOAAMAFkAJAgMDBAUAAAMjAD8/ERIBOREzMTABMxEjAcfb2wYO+CEAAQBS/rwDCAW2AB8AUkAuEgQECBgAAA4IHAwLCAsgIQwcHA8bbxufGwNuGwFLG8sbAjobARsbBBITAwUEJAA/Mz8zEjkvXV1dcTMSORESATk5ETMzETMzETMRMxEzMTAFFAYGIzU2NjURNDc1JjURNCYnNTIWFhURFBYzFSIGFQIIUr2nY0vp6Upkpr1Tg319gy1wcjXiAkVGASu6IwwjuwErRkQD4TRycP7CYVHvUmEAAAAAAQBYAicEOQN9ABUATkAQAw8WFw4GAwsAEbARwBEDEbj/wEAjDA9IEREAHwY/Bm8GAw8GHwY/Bl8GfwaPBq8GzwbvBv8GCgYAL11xMzMvK10zxhDGERIBOTkxMAEiBgc1NjMyFhcWMzI2NxUGIyImJyYBQjd9NmeZSYFLgWI1fjZlm0J4WoMCoEM2520gIDdAOedtGiU4AAACAHX+jwHTBF4AAwAPAERAKAoDBAIDAhARDwEfAS8BA18BAQFAExZIAAEQAQIJAwEBDQMNB31ZDQ8APysAGC8SOS9fXl0rXXEREgE5OREzETMxMBMzEyEBFAYjIiY1NDYzMhao9DP+pgFeWlZTW11RVFwCXvwxBSVUVlhSUVlYAAEAj//sBBAFywAbAElAKRECCBoaBRsKFxsCBBwdCA5zWQUACAETAwhABgcZE3NZAIAZkBkCGRsZAD/NXTIrABg/Gs1fXl0yKxESARc5ETMzETMRMzEwJSQREBI3NTMVFhcHJiYjIgYVEDMyNjcVBgcVIwIz/lzR07KmhVpIfD55dO1ShGR/irKwOwH6AQUBHB+mnglB6x0kp6v+uR8t/j0JvAAAAQBSAAAEagXLAB0AUEAqGA8SCQ0NGhYRAgsWEgUeHxMSDwwYGRh4WQkZGRIAAAVzWQAHEg92WRIYAD8rABg/KxESADkYLzMrEQAzERI5ERIBFzkRMzMRMxEzMzEwATIXByYjIgYVFSEVIRUUByERITU2NjU1IzUzNTQ2ArzDw12dc05UAXf+iZcCzvvoZ02ysuUFy1LmQFlTwduPqk7+/PgscmSR28PJ2QAAAAIAcQD+BCEEqgAbACcAWEA0AhoaAAUXFwMZHAAOIgwQEAkTEyIAGQQoKQkMEBMFAhoXCB9QFQEVJR8HbwePB68HzwcFBwAvXTPEXTIXORESARc5ETMzETMRMxEzETMzETMRMxEzMTATNDcnNxc2MzIXNxcHFhUUBxcHJwYjIicHJzcmNxQWMzI2NTQmIyIGvDaBk39bamlbf5aBNTV9kn9fZXNUfZF/Ns9tUFFvcU9ObwLTZl9/k381N4GPgVlua1x9kX0zM3uRfV1oTW9uTlBucAAAAQAGAAAEiQW2ABYAbkA/BQkJAxIODhQHCwsMAwACAAwQFBUGFxgKDg8OeVkHAA9wDwIJAw8GEhMSeVkDAA8DDxMfE08TAxMTDAEVBgwYAD8/MxI5L10XMysRADMYL19eXTMrEQAzERIBFzkRMxEzETMRMxEzETMRMzEwAQEhATMVIxUzFSMVITUjNTM1IzUzASECSAEIATn+gcP29vb+4ff3977+hwE8A1wCWv0Vsoqy3d2yirIC6wAAAAACAcf+LwKiBg4AAwAHAChAEgIGBgMHBwgJAwMHBAQHAAAHIwA/PxE5LxE5LxESATkRMzMRMzEwATMRIxEzESMBx9vb29sGDvzR/n/80QAAAAIAav/sA38GHwAsADcAV0AuCRsYMgItIAAQBC0AGyYWMjImAAQEODkTNRgwAjUYMAQeBwcNbVkHAR4kbVkeEwA/KwAYPysREgAXOREzETMREgEXOREzETMRMxEzETMRMxEzETMxMBM0NyY1NDYzMhcHJiYjIgYVFBYXFhYVFAcWFhUUBiMiJzUWFjMyNTQmJicmJjcUFhc2NTQmJwYGeYWF37aqwVJqezpRSlx5qJV9Pj/vycuSUcZGwiVaULqH3315Tmx+JjQDIZpeVJOBnlS/MiItLy5JL0WkabJQKGlKlK9Pzyk5dScwMyJLmolAZDE5V0VdLQ9LAAAAAAIBFwT4A8UGBAALABcAKEATBgASDAAMGBkPAwMVUAkBgAkBCQAvXXEzMxEzERIBOTkRMxEzMTABNDYzMhYVFAYjIiYlNDYzMhYVFAYjIiYBF0tAQktMQUBLAZNRPEFNTkA8UQV9QUZKPTxJRj9GQUg/PUhBAAAAAAMAZP/sBkQFywAWACYANgBQQDEnFwMPHy8vFAkPFwU3OAAAEhAScBKAEgQSEhsGfwwBDwwfDH8MjwwEDAwjMxsEKyMTAD8zPzMSOS9dcTMROS9dMxESARc5ETMRMxEzMTABIgYVFBYzMjcVBgYjIiY1NDYzMhcHJgE0EiQzMgQSFRQCBCMiJAI3FBIEMzIkEjU0AiQjIgQCA313h3WHX3g8YkHB096+gno8avyTyAFeysgBXsrC/qLQz/6iw22sASusrAEqraz+1ays/tatBB+rmZ2oL4MbF/Db0fg+fTb+vMgBXsrI/qLKxf6m0M8BWsas/tatrAErrKwBKq2s/tUAAAAAAgAvAvACuAXHABcAIQBLQC0QGAcLHhcBAR4HAyIjHwALEAsgCwMLCxMAGwAEMASQBAMEeQ2JDQJoDQENEx8APzNdXdRdMsQSOS9dMxESARc5ETMRMxEzMzEwAScGBiMiJjU0Njc3NCMiByc2NjMyFhURJRQWMzI2NTUHBgIxHyt8SnV9pbljf1GIQkKfY4mV/kQuIE1ZY5EC/G46QHVqbW0JBHU9hyAyjoP+RtUmJFNBJAYKAAAAAAIAUgBeBJoEBAAGAA0ALkAUCQsLCgIEBAMDBgoNBg0ODwwFCAEALzMvMxESATk5ETMRMxEzETMRMxEzMTATARcBAQcBJQEXAQEHAVIBc9v+6QEX2/6NAfoBctz+6QEX3P6OAj0Bx3f+pP6kdwHFGgHHd/6k/qR3AcUAAAABAFgA+AQ5Az8ABQA8QCYAAQMBBgcBAQO4BAFlBAFKBNoEAjkEAQ8EjwQCLwRvBJ8E7wQEBAAvXXFdXV1dMzMvERIBOTkRMzEwJSMRITUhBDnb/PoD4fgBbNsA//8APQGoAlYCogIGABAAAAAEAGT/7AZEBcsACAAWACYANgBlQDoNCQwEJxcAERESCQQfLy8EEhcENzgQAAATDhIPEh8SfxKPEgQIEwATEBNwE4ATBBITEhMjMxsEKyMTAD8zPzMSOTkvL10RM10RMxI5LzMREgEXOREzETMRMxEzETMRMxEzMTABMzI2NTQmIyMFFAYHEyMDIxEjESEyFgE0EiQzMgQSFRQCBCMiJAI3FBIEMzIkEjU0AiQjIgQCAtdmUVlSWmQBrlZK7rDNf5wBB6ib+9/IAV7KyAFeysL+otDP/qLDbawBK6ysASqtrP7VrKz+1q0C/FBBSUGGU3kd/nMBYv6eA3+D/sTIAV7KyP6iysX+ptDPAVrGrP7WrawBK6ysASqtrP7VAAH/+gYUBAYG3QADAC5AHwAFAQQBDwIfAn8CjwIEDwKfAq8C3wLvAgUCQAsQSAIALytdcTMRATMRMzEwASE1IQQG+/QEDAYUyQAAAAACAFwDGQMQBcsADgAaACFADg8ACBUAFRscEgwMGAQHAD8zMy8zERIBOTkRMxEzMTATNDY2MzIWFhUUBgYjIiY3FBYzMjY1NCYjIgZcXKBeXKFdXaBdkcm/WUJCWltBQFsEcVygXlyiXF2hWseRQFpcPj9eXAAAAAACAFgAAAQ5BQIACwAPAEZAJgEMBgoKAwsIDw8LDAMQEQsJAQEEBrgCAWUCAUMC0wICOQIBAg0MAC8zL11dXV0zMzMRMzMREgEXOREzETMzETMRMzEwASE1IREzESEVIREjATUhFQHb/n0Bg9sBg/592/59A+ECotsBhf572/5//t/b2wABAC8CSgK+BcsAFgAuQBUMAQARFQERBgEGFxgCARUJDh8VASAAPzM/MxESORESATk5ETMRMxEzETMxMAEhNTc2NjU0JiMiByc2MzIWFRQGBwchAr79eeBmOTAoUWN7k72Jnl6BaQFgAkqo22RZMiYoWJiBhXVVlnVfAAAAAQA7AjkCtgXJACUAWkA0IAsLFAIDAxQAGQYQEBkUAyYnAhQUexUBzRUBeRWJFQJoFQEPFR8VAhUVCVgcARwjHw4JIQA/Mz8zXRI5L11dXV1xMxI5ERIBFzkRMxEzETMRMxEzETMxMAEUBxUWFhUUBiMiJzUWMzI1NCYjIzUzMjY1NCYjIgYHJzY2MzIWApqqXmiwuo+ClHuPWE5wXFNRMjMvVDllPpdnf6IE4Y83DRRuT3mLRr5aazU1oDQ5JjImKI0vPoAAAAABAUwE2QONBiEACAAaQAwABQkKA4APAF8AAgAAL10azBESATk5MTABNTY3IRUGBgcBTKw/AVY0+0cE2RvIZRU0zTIAAAEAoP4UBKgEXgAYADlAHRIAFRUWCQsLBhYDGRoMEg8HFw8KFRYbDwNdWQ8WAD8rABg/Pz8zEjk5ERIBFzkRMxEzETMzMTABFBYzMjY1ESERIycjBgYjIiYnFxcRIREhAdFYXn5yATHnKw8qeFg+aCAFBf7PATEB0Xl5rcQCDvuillVVLixVnf7ABkoAAQBx/vwEjwYUAA8AJ0ASBAUAAQEFCwMQEQgIDgEFAw4AAD8zLzMSOS8REgEXOREzETMxMAEjESMRIxEGIyImNRA2MyEEj6Gmoj5U2Mva6AJc/vwGUPmwAzMS+vsBBP4AAQB1AikB0wN9AAsAH0ASAAYMDQkDfVkPCT8JfwmvCQQJAC9dKxESATk5MTATNDYzMhYVFAYjIiZ1WlZTW11RVFwC01RWWFJRWVgAAAH/2/4UAaIAAAASADFAGQUQDQALDQsTFA0QEA6ECJQIAnYIAQgDGw4ALz8zXV0SOS8zERIBOTkRMxEzMjEwBRQGIyInNRYWMzI1NCc3MwcWFgGil55ORBtbGUimTsEbSlj6gHIVqAcOPlMZmj0YZQABAFwCSgJIBbYACgAiQA8ABAEIAQsMBwcJASAECR4APzM/EjkvERIBOTkRMzMxMAEjETc3BgcHJyUzAkjuAwUbME5tAS2/AkoBvnBfJCo9f+sAAAACADkC8ALhBccACwAXACdAFAwGABIGEhgZDwADMAOQAwMDFQkfAD8zxF0yERIBOTkRMxEzMTABFAYjIiY1NDYzMhYFFBYzMjY1NCYjIgYC4befmbmzo5i6/iNBSEg/P0hIQQRcq8HFp6nCxaZkZWVkZGNjAAIAUgBeBJoEBAAGAA0ALEATBAICAwsJCQcKAAMKAw4PAQgFDAAvMy8zERIBOTkRMxEzMxEzETMRMzEwAQEnAQE3AQUBJwEBNwEEmv6N2wEW/urbAXP+Bv6N2wEW/urbAXMCI/47dwFcAVx3/jka/jt3AVwBXHf+OQAAAP//AC4AAAaSBbYAJgB70gAAJwIXAskAAAEHAjsDnP23AAmzAwISEgA/NTUA//8ALgAABrQFtgAmAHvSAAAnAhcCyQAAAQcAdAP2/bcAB7ICEBIAPzUAAAD//wBaAAAGsAXJACYAdR8AACcCFwMQAAABBwI7A7r9twAJswMCLRIAPzU1AAACAD3+eQPXBF4AGwAnAE5ALQ4pBxQBGhwaIhQEKCkvGwFfG68bvxsDABsQGwIJAxsbJRERCn5ZESUffVklDwA/KwAYLysREgA5GC9fXl1dcRESARc5ETMRMxEzMTABFRQGBwYGFRQWMzI2NxcGBiMiJjU0Njc2NjU1ARQGIyImNTQ2MzIWAslZbG05V1lPtGBmYvdq3Pthj181AShaVlNbXVFUXAJeSmKOTU5YPzlKOirdOEXBqWyeaUZKPTsBVlRWWFJRWVgA//8AAAAABYUHcwImACQAAAEHAEMABgFSABa5AAL/sUAJERYFBiUCFgUmACs1ASs1//8AAAAABYUHcwImACQAAAEHAHYA0QFSABNACwJ8DhMFBiUCFgUmACs1ASs1AAAA//8AAAAABYUHcwImACQAAAEHAUsAVgFSABNACwIAExsFBiUCGwUmACs1ASs1AAAA//8AAAAABYUHYAImACQAAAEHAVIAVgFSABNACwIAEh4FBiUCEgUmACs1ASs1AAAA//8AAAAABYUHVgImACQAAAEHAGoAVgFSABdADQMCAw4gBQYlAwIjBSYAKzU1ASs1NQAAAAADAAAAAAWFBwoAEAAWACEATkAoAyMIEQIWCR0LAhQAFxcUCQsEIiMJAhQDCBoOHxYGaVkWFh8ECBIfAwA/PzMSOS8rABgQxDISFzkREgEXOREzETMRMxEzETMyETMxMAEUBwEhAyEDIQEmNTQ2MzIWAwImJwYDATQmIyIGFRQXMjYDvi8B9v6yav3rav6yAfQrh3FtkDuTJQghnAEbNioqN2EqNgYfWTv6dQFc/qQFizpYbIGB+9cB2XwkgP4HA70tMzMtXQQ0AAIAAAAAByUFtgAPABMAjUBVBQoODhEBCAAADAEQBBQVCg1pWUYKAdYKARIKAQMhCgGxCgEETAoBowoBCh4MSRkKAQMPCo8KAgkGCgoBBhADaVkQEAEGBRIJEwYTalkGAwEOaVkBEgA/KwAYPysRADMYPxESOS8rERIAORgvX15dX10rXV1fXXFfcV1xKxESARc5ETMRMzMRMzIxMCEhESEDIQEhFSERIRUhESEBIREjByX8l/4Vlv7FAo8Elv3NAg798gIz+x0Ben8BXP6kBbb+/r/+/ocBYAJOAP//AHf+FATRBcsCJgAmAAABBwB6Ah0AAAALtgE3HRcNEyUBKzUAAAD//wC4AAAEAgdzAiYAKAAAAQcAQ/+3AVIAFbQBFAUmAbj/x7QQFAILJQErNQArNQD//wC4AAAEAgdzAiYAKAAAAQcAdgBcAVIAE0ALARQFJgFrDRECCyUBKzUAKzUAAAD//wCvAAAEFAdzAiYAKAAAAQcBS//1AVIAE0ALARkFJgEEEhkCCyUBKzUAKzUAAAD//wC4AAAEAgdWAiYAKAAAAQcAav/5AVIAF0ANAgEhBSYCAQoMHgILJQErNTUAKzU1AAAA//8AKgAAAtsHcwImACwAAAEHAEP+3gFSABW0ARQFJgG4/7y0EBQGCyUBKzUAKzUA//8AQgAAAy4HcwImACwAAAEHAHb/oQFSABNACwEUBSYBfw0RBgslASs1ACs1AAAA////3AAAA0EHcwImACwAAAEHAUv/IgFSABNACwEZBSYBABIZBgslASs1ACs1AAAA//8AOQAAAucHVgImACwAAAEHAGr/IgFSABdADQIBIQUmAgEBDB4GCyUBKzU1ACs1NQAAAAACAC8AAAV1BbYADAAYAJBAVxQWDAoSFhYBCgYNCg0ZGhUMAAxpWRLdAAEqAAEPAF8AfwADbwABA74AAQSqAAFMAAFMAAEDPgABBBkAARkAAQMPAI8AAgkGAAAKAgIRaVkCAwoWaVkKEgA/KwAYPysREgA5GC9fXl1fXXFfXV9dcV1fXV9dcXFdMysRADMREgE5OREzETMzETMRMxEzMTATMxEhIAAREAAhIREjJTQmIyMRMxUjETMgL4kBywFmAYz+Zf58/mKJBATQ0qPt7YMBwgNSAmT+hv6t/pf+gAJUjejv/pr+/qz//wC4AAAFyQdgAiYAMQAAAQcBUgDTAVIAE0ALARQFJgEAFCAIDyUBKzUAKzUAAAD//wB3/+wF5wdzAiYAMgAAAQcAQwB1AVIAFbQCHgUmArj/s7QaHgYAJQErNQArNQD//wB3/+wF5wdzAiYAMgAAAQcAdgFGAVIAE0ALAh4FJgKDFxsGACUBKzUAKzUAAAD//wB3/+wF5wdzAiYAMgAAAQcBSwDDAVIAE0ALAiMFJgIAHCMGACUBKzUAKzUAAAD//wB3/+wF5wdgAiYAMgAAAQcBUgDDAVIAE0ALAhoFJgIBGiYGACUBKzUAKzUAAAD//wB3/+wF5wdWAiYAMgAAAQcAagDDAVIAF0ANAwIrBSYDAgIWKAYAJQErNTUAKzU1AAAAAAEAgQEMBBAEmgALADtAIQUHBwYDCQkGAQsLBgAADA0PAD8AAi8AXwB/AK8A3wAFAAAZL11xERIBOREzMxEzETMRMxEzETMxMAEBNwEBFwEBBwEBJwGs/tWYAS0BMZn+zwEtlf7P/tOWAtMBLZr+1QErlv7P/tGYAS3+1ZgAAAAAAwB3/6YF5wYEABMAGwAiAExAKxQKBQgSDwAcHA8XCAcKBiMkHxYeFwQZIQ8SCAUEAw0NGWlZDQQDIWlZAxMAPysAGD8rERIAFzkREhc5ERIBFzkRMxEzETMRMzEwARAAISInByc3JhEQACEyFzcXBxYBFBcBJiMiBgU0JwEWMyAF5/6Y/rDFi1qiWsYBaQFRxpJUoFjC+9U4AfpUabm8AuYz/gxMaAFzAt3+lf56QYdsiMIBgwFtAYFGfWiDwv6Gv3QC9C359bR1/REnAAAA//8Arv/sBV4HcwImADgAAAEHAEMAKwFSABW0ARsFJgG4/5K0FxsJASUBKzUAKzUA//8Arv/sBV4HcwImADgAAAEHAHYBDgFSABNACwEbBSYBdBQYCQElASs1ACs1AAAA//8Arv/sBV4HcwImADgAAAEHAUsAmgFSABNACwEgBSYBABkgCQElASs1ACs1AAAA//8Arv/sBV4HVgImADgAAAEHAGoAmgFSABdADQIBKAUmAgECEyUJASUBKzU1ACs1NQAAAP//AAAAAAT+B3MCJgA8AAABBwB2AIEBUgATQAsBEQUmAW4KDgcCJQErNQArNQAAAAACALgAAASqBbYADAAVADxAHgkNBQUGABEGERYXCRVpWQkJBgcEDWtZBAQGBwMGEgA/PxI5LysREgA5GC8rERIBOTkRMxEzETMzMTABFAQhIxEhESEVMzIEATMyNjU0JiMjBKr+4/76mf7KATay/gEM/URkkY5/iHwDAuX4/tsFtuXu/jxpemtoAAAAAAEAoP/sBWgGHwA1AFBAKxQhLi8HIQAnDhoaJyEvBDY3BAcKAw4AJCEeAxonLxUzKl5ZMwERGF9ZERYAPysAGD8rABg/ERIXORESFzkREgEXOREzETMRMxEzETMxMAEUDgQVFBYXHgIVFAYjIiYnNRYWMzI1NCYmJyYmNTQ2NzY2NTQmIyIGFREhETQkITIEBOEqQEpAKjVCkmkz6eNjkDw1pUCoIFJKfmJGRk0+f2R0gv7PASUBAvQBJgTZQGFMOjAqFhs0KFtiek6srh0i8iQyeykzPCpId1FAajE3UC48UWlg+5gEc8njsQD//wBW/+wEOwYhAiYARAAAAQYAQ6MAAA65AAL/k7QmKxIXJQErNQAA//8AVv/sBDsGIQImAEQAAAEGAHZtAAALtgJdIycSFyUBKzUA//8AVv/sBDsGIAImAEQAAAEGAUv3/wAOuQAC/+e0KDASFyUBKzUAAP//AFb/7AQ7Bg4CJgBEAAABBgFSCgAADrkAAv/7tCczEhclASs1AAD//wBW/+wEOwYEAiYARAAAAQYAagYAABCxAwK4//i0IzUSFyUBKzU1//8AVv/sBDsGsgImAEQAAAEGAVApAAAQsQMCuP/8tCkjEhclASs1NQADAFb/7Ab+BHUAKAAyADgAg0BLFAkmHy0JIA0yHzY2MgkDOToDGAYWDSlnWQ01DyABDQU1IGZZpTUBaTUBDDUcNQIQAzU1BhYbM19ZFhFgWRsWEAAjYFkGL19ZAAYWAD8zKysAGD8zKysREgA5GC9fXl1dXSsAX15dGBDFKxESADk5ERIBFzkRMxEzMxEzETMRMzEwBSImJwYGIyImNTQ2Nzc1NCYjIgcnNjMyFzY2MzIAFRUhFhYzMjcVBgYBBwYGFRQzMjY1ASIHISYmBTGJ4UhixZ6hw/Lxv1lNjKVjvenjc0KteN0BAP0tBZCCxLhPuP1BcXx8jGV4AiPZEQGuAmoUZWl1Wb2lsqkJBlRFQk3KZYNAQf7t6ZSCkljsJygCGgQEV1uBemUB8OxwfAD//wBc/hQD3QRzAiYARgAAAQcAegGDAAAAC7YBJRwWAgclASs1AAAA//8AXP/sBGIGIQImAEgAAAEGAEOpAAAOuQAC/7e0ICQKESUBKzUAAP//AFz/7ARiBiECJgBIAAABBgB2cwAAC7YCgB0hChElASs1AP//AFz/7ARiBiECJgBIAAABBgFLCAAAC7YCFSIpChElASs1AP//AFz/7ARiBgQCJgBIAAABBgBqEgAADbcDAiEcLgoRJQErNTUAAAD///+bAAAB3AYhAiYA8wAAAQcAQ/5PAAAADrkAAf+DtAgMAgMlASs1//8AkQAAAtIGIQImAPMAAAEHAHb/RQAAAAu2AXkFCQIDJQErNQAAAP///4YAAALrBiECJgDzAAABBwFL/swAAAALtgEAChECAyUBKzUAAAD////jAAACkQYEAiYA8wAAAQcAav7MAAAADbcCAQEEFgIDJQErNTUAAAIAXP/sBJgGHwAbACcAd0BBBwsXFiIRCBkLHBwWGQACGxEHKCkWFg4DBRldWQUFDgMAGmFZGxkACAUHBgYaGgMUFB9fWRQUDgIGAwEOJV9ZDhYAPysAGD8zMxI5LysREgA5GC8SFzkrERIAORgvKxESADkYLxESARc5ETMRMxEzETMRMzEwASYnNxYXNxcHFhIVEAAjIgA1NAAzMhc3JicHJwE0JiMiBhUUFjMyNgH+WT9llG7hZKqclP7e//X+2gEE3c1GCECA5mQCEnpreW94cHtqBRI7IrBDS4yaaI/+mun+6P7HARLr6QERYgSbeo6c/WxsgomSjI6k//8AoAAABKgGDgImAFEAAAEGAVIzAAAOuQAB//20GSULFCUBKzUAAP//AFz/7ASYBiECJgBSAAABBgBDoQAADrkAAv+UtB4iEwwlASs1AAD//wBc/+wEmAYhAiYAUgAAAQcAdgCHAAAAC7YCeRsfEwwlASs1AAAA//8AXP/sBJgGIQImAFIAAAEGAUsMAAAOuQAC//+0ICcTDCUBKzUAAP//AFz/7ASYBg4CJgBSAAABBgFSDAAAC7YCAB4qEwwlASs1AP//AFz/7ASYBgQCJgBSAAABBgBqDAAADbcDAgAaLBMMJQErNTUAAAAAAwBYAN0EOQTHAAMADwAbAHxAJBYKChAEAwQAAxwdBwANAQANUA1gDZANoA0FgA2QDcAN8A0EDbj/wEAxCw5IDQ0ZLxNPE28TjxOfEwUTEwC4AQFlAQFKAdoBAjkBAQ8BjwECLwFvAZ8B7wEEAQAvXXFdXV1dMzMvXTMyLytdcXIzERIBFzkRMzMRMzEwEzUhFQU0NjMyFhUUBiMiJhE0NjMyFhUUBiMiJlgD4f2DSkJCSUpBQUtKQkNISkFBSwJk29vvTEtOSUZSTgMES01RR0ZRTgADAFz/tASYBJEAEwAbACMATUAsFAoFCAAcEg8PHB4XCAcKByQlHxYeFwQZIQ8SCAUEAw0NGV1ZDRADIV1ZAxYAPysAGD8rERIAFzkREhc5ERIBFzkRMxEzETMRMzEwARAAISInByc3JhEQACEyFzcXBxYBFBcBJiMiBgU0JwEWMzI2BJj+4P7/fmxDmkSYAR4BA4R0N5g6jvz7EwE9Kz96bAHNDP7LJjZ6awIx/u/+zC1laWScARQBEgEwNFJsVJv++15IAdsXpadRPP4yD6n//wCa/+wEogYhAiYAWAAAAQYAQ6kAAA65AAH/eLQZHQkUJQErNQAA//8Amv/sBKIGIQImAFgAAAEHAHYApgAAAAu2AXQWGgkUJQErNQAAAP//AJr/7ASiBiECJgBYAAABBgFLMQAAC7YBABsiCRQlASs1AP//AJr/7ASiBgQCJgBYAAABBgBqLwAAELECAbj//7QVJwkUJQErNTX//wAA/hQEjQYhAiYAXAAAAQYAdj0AAAu2AWMYHAAJJQErNQAAAgCg/hQEtAYUABUAIQBBQCITGgwVBA8PEAYfEB8iIwwVCQMRABAbAxZdWQMQCR1dWQkWAD8rABg/KwAYPz8REjk5ERIBOTkRMxEzERczMTABNjYzMhIREAIjIicjFxcRIREhEQcHFyIGBxUUFjMyETQmAdEyomnG4N/H1WgOBwf+zwExBwfpcWgCa3TNZQPNUVX+y/7z/u/+zIk+Xv47CAD+eXhITougIbScAVKlpf//AAD+FASNBgQCJgBcAAABBgBq3AAADbcCAQMXKQAJJQErNTUAAAD//wAAAAAFhQb+AiYAJAAAAQcBTQBYAVIAE0ALAgUREAUGJQIRBSYAKzUBKzUAAAD//wBW/+wEOwWsAiYARAAAAQYBTQoAAA65AAL//LQmJRIXJQErNQAA//8AAAAABYUHfQImACQAAAEHAU4AVgFSABNACwIAFA4FBiUCEQUmACs1ASs1AAAA//8AVv/sBDsGKwImAEQAAAEGAU4MAAAOuQAC//y0KSMSFyUBKzUAAP//AAD+FAWFBbwCJgAkAAABBwFRA3sAAAAOuQAC/7S0HB0AISUBKzX//wBW/hQETAR1AiYARAAAAQcBUQKsAAAADrkAAv/ztDEyABglASs1//8Ad//sBNEHcwImACYAAAEHAHYBCgFSABNACwEfBSYB0hgcDRMlASs1ACs1AAAA//8AXP/sA+MGIQImAEYAAAEGAHZWAAALtgGmFxsCByUBKzUA//8Ad//sBNEHcwImACYAAAEHAUsAqgFSABNACwEkBSYBch0kDRMlASs1ACs1AAAA//8AXP/sBA4GIQImAEYAAAEGAUvvAAALtgE/HCMCByUBKzUA//8Ad//sBNEHZgImACYAAAEHAU8B2QFSABNACwEeBSYBbhcbDRMlASs1ACs1AAAA//8AXP/sA90GFAImAEYAAAEHAU8BOwAAAAu2AVcWGgIHJQErNQAAAP//AHf/7ATRB3MCJgAmAAABBwFMAKwBUgATQAsBHAUmAXQgGA0TJQErNQArNQAAAP//AFz/7AQfBiECJgBGAAABBgFMAAAAC7YBUB8XAgclASs1AP//ALgAAAV1B3MCJgAnAAABBwFMAGgBUgAVtAIVBSYCuP++tBkRBQAlASs1ACs1AP//AFz/7AYlBhQCJgBHAAABBwI4A28AAAALtgJcKCgNDSUBKzUAAAD//wAvAAAFdQW2AgYAkgAAAAIAXP/sBQwGFAAaACcAa0A6DiUlAxIWEAsWCR4eCwMDKSgIGQAGFQwPDGVZEw8PBhEGIl1ZAAYQBiAGAwkDBgYXEQAXFQAbXVkAFgA/KwAYPz8SOS9fXl0rERIAORgvMysRADMREjk5ERIBFzkRMzMRMxEzETMRMzEwBSICNRASMzIXMyY1NSE1ITUhFTMVIxEjJyMGJzI2NzU0JiMiBhUUFgICxuDmyNZsChf+xQE7ATKbm+o7DWhqdW4EcXtmcXIUAST+AQYBIaSDZTPHoaHH+1SRpfN9lBykjZ2WlZb//wC4AAAEAgb+AiYAKAAAAQcBTf/3AVIAE0ALAQ8FJgEIDw4CCyUBKzUAKzUAAAD//wBc/+wEYgWsAiYASAAAAQYBTfsAAAu2AgofHgoRJQErNQD//wC4AAAEAgd9AiYAKAAAAQcBTv/qAVIAFbQBDwUmAbj/+rQSDAILJQErNQArNQD//wBc/+wEYgYrAiYASAAAAQYBTgAAAAu2Ag0iHAoRJQErNQD//wC4AAAEAgdJAiYAKAAAAQcBTwEvATUAE0ALARMFJgELDBACCyUBKzUAKzUAAAD//wBc/+wEYgYUAiYASAAAAQcBTwE/AAAAC7YCGRwgChElASs1AAAA//8AuP4UBAIFtgImACgAAAEHAVECNQAAAAu2Ae8aGwEAJQErNQAAAP//AFz+KARiBHMCJgBIAAABBwFRAj8AFAALtgLVJyEKGSUBKzUAAAD//wCvAAAEFAdzAiYAKAAAAQcBTP/1AVIAE0ALAREFJgEEFQ0CCyUBKzUAKzUAAAD//wBc/+wEYgYhAiYASAAAAQYBTAIAAAu2Ag8lHQoRJQErNQD//wB3/+wFJwdzAiYAKgAAAQcBSwC4AVIAE0ALASgFJgFVISgIAiUBKzUAKzUAAAD//wBc/hQEcQYhAiYASgAAAQYBSwQAAAu2AgotNB4mJQErNQD//wB3/+wFJwd9AiYAKgAAAQcBTgC+AVIAE0ALAR4FJgFbIRsIAiUBKzUAKzUAAAD//wBc/hQEcQYrAiYASgAAAQYBTgoAAAu2AhAtJx4mJQErNQD//wB3/+wFJwdmAiYAKgAAAQcBTwH8AVIAE0ALASIFJgFmGx8IAiUBKzUAKzUAAAD//wBc/hQEcQYUAiYASgAAAQcBTwFQAAAAC7YCIicrHiYlASs1AAAA//8Ad/47BScFywImACoAAAEHAjkBFwAAAAu2AVIjIAgCJQErNQAAAP//AFz+FARxBiECJgBKAAABBwI6AIMAAAALtgIWLC8eJiUBKzUAAAD//wC4AAAFZgdzAiYAKwAAAQcBSwCgAVIAFbQBGQUmAbj//rQSGQYLJQErNQArNQD//wCgAAAEqAeqAiYASwAAAQcBSwA1AYkAFbQBIwImAbj//rQcIwoVJQErNQArNQAAAgAAAAAGHwW2ABMAFwCXQFgCGQsQFwgIDQkABAQRFAUJBRgZAxYLDAtxWQAQ/wwBAwwMFw4AFwEMBhcHaVlGFwHWFwESFwEDIRcBsRcBBKMXAUwXATsXARkXAQgXiBcCFxcJEg4DBQkSAD8zPzMSOS9dXV1dXV9dcV9xXXErAF9eXRESORgvX10zMysRADMzERIBOTkRMzMzETMRMzMRMzMyETMxMAEzFSMRIREhESERIzUzNSEVITUhATUhFQVmubn+y/29/sq4uAE2AkMBNf7L/b0E9Mf70wJ3/YkELcfCwsL9w7S0AAEABAAABKgGFAAcAF9AMwsJFhAICA0JEhMAAQETCQMdHhYJGRMLDAtlWRAMDBkOGQRdWQAZEBkgGQMZGQkOAAEJFQA/Mz8SOS9dKxESADkYLzMrEQAzERI5ERIBFzkRMxEzETMzETMzETMxMCEhETQjIgYVESERIzUzNSEVIRUhFRQHMzYzMhYVBKj+z7R/c/7PnJwBMQE7/sUOEmbexcoCUPKvwv4vBKzHoaHHElO2pNLH////8QAAAy4HYAImACwAAAEHAVL/IgFSABNACwEQBSYBARAcBgslASs1ACs1AAAA////mwAAAtgGDgImAPMAAAEHAVL+zAAAAAu2AQEIFAIDJQErNQAAAP//AD8AAALlBv4CJgAsAAABBwFN/yQBUgATQAsBDwUmAQMPDgYLJQErNQArNQAAAP///+kAAAKPBawCJgDzAAABBwFN/s4AAAALtgEDBwYCAyUBKzUAAAD//wAHAAADGgd9AiYALAAAAQcBTv8kAVIAE0ALAQ8FJgECEgwGCyUBKzUAKzUAAAD///+vAAACwgYrAiYA8wAAAQcBTv7MAAAAC7YBAAoEAgMlASs1AAAA//8AQv4UAtsFtgImACwAAAEHAVEAvgAAAAu2AQUXEgYLJQErNQAAAP//AF7+FAH0BhQCJgBMAAABBgFRVAAAC7YCMxscCgklASs1AP//AEIAAALbB2YCJgAsAAABBwFPAFQBUgAVtAETBSYBuP//tAwQBgslASs1ACs1AAABAKAAAAHRBF4AAwAWQAkAAQEEBQIPARUAPz8REgE5ETMxMCEhESEB0f7PATEEXv//AEL+UgULBbYAJgAsAAABBwAtAx0AAAAOuQAB/9+0FBQKGyUBKzX//wCT/hQEKQYUACYATAAAAQcATQJKAAAAELEDArj/pbQVFQklJQErNTUAAP///2j+UgMJB3MCJgAtAAABBwFL/uoBUgATQAsBAxMbCQolARsFJgArNQErNQAAAP///33+FALnBiECJgI3AAABBwFL/sgAAAAOuQAB//y0ExsJCiUBKzX//wC4/jsFUAW2AiYALgAAAQcCOQCcAAAADrkAAf+itBUSBgAlASs1//8AoP47BPYGFAImAE4AAAEGAjl1AAAOuQAB/7S0FxQLBSUBKzUAAAABAKAAAAT2BF4ADwA8QB4DBQURDQ4PAwkJCgQHCgcQEQcEAAMICAoCCw8GChUAPzM/MxI5ERczERIBOTkRMxEzERczETMRMzEwATcBIQEBIQEHESERIREHBwHPjQE6AUX+SAHT/qT+xo/+zwExAwMCRqoBbv4A/aIBqlr+sARe/tuhUv//ALgAAAQ/B3MCJgAvAAABBwB2/78BUgATQAsBHQYOAQIlAQ4FJgArNQErNQAAAP//AKAAAAL0B6wCJgBPAAABBwB2/2cBiwAWuQAB/+BACQQMAgMlAQwCJgArNQErNf//ALj+OwQ/BbYCJgAvAAABBgI5SgAADrkAAf/ZtA4LAQUlASs1AAD//wBj/jsB0QYUAiYATwAAAQcCOf8FAAAADrkAAf/2tAcIAQAlASs1//8AuAAABD8FtwImAC8AAAEHAjgBdf+jABSzAQkDAbgBI7QJCQICJQErNQA/NQAA//8AoAAAA4UGFAImAE8AAAEHAjgAzwAAAAu2AZoHBwMDJQErNQAAAP//ALgAAAQ/BbYCJgAvAAABBwFPAi/9cAAWQBABAAYwBkAGcAYEUgYKAwQlAStdNf//AKAAAAOXBhQAJgBPAAABBwFPAbj9OAALtgF6BAQAACUBKzUAAAAAAQACAAAEPwW2AA0AQEAhCQsDAAcLCwQAAA0ODwMBBAoHCQYIQAICAAUDAAtpWQASAD8rABg/EjkvGs0XORESATk5ETMzETMRMxEzMTAzEQcnNxEhETcXBREhEbhFcbYBNo91/vwCUQHsKcRvAsD9/FjEnv5Y/wAAAf/nAAACiwYUAAsAM0AaAg0IAAQECQUFDA0GCAkDAAIGAQcHBQoABRUAPz8SOS/NFzkREgE5ETMzETMyETMxMAE3FwcRIREHJzcRIQHRRnS6/s9IcbkBMQOiK8Vw/WgB3SvFcAMtAP//ALgAAAXJB3MCJgAxAAABBwB2AUQBUgATQAsBGAUmAXARFQgPJQErNQArNQAAAP//AKAAAASoBiECJgBRAAABBwB2AKoAAAALtgFyFhoLFCUBKzUAAAD//wC4/jsFyQW2AiYAMQAAAQcCOQD4AAAADrkAAf/CtBgVCA8lASs1//8AoP47BKgEcwImAFEAAAEGAjl1AAAOuQAB/9u0HRoLFCUBKzUAAP//ALgAAAXJB3MCJgAxAAABBwFMAO4BUgATQAsBFQUmARoZEQgPJQErNQArNQAAAP//AKAAAASoBiECJgBRAAABBgFMTgAAC7YBFh4WCxQlASs1AP//AAYAAAWiBbYAJwBRAPoAAAEGAgftAAAOuQAB//e0FhYLCyUBKzUAAQC4/lIFyQW2ABkAO0AfCg0NDhQIFxISCAIOBBobEgoIAw4VDwMOEgAFa1kAIgA/KwAYPz8zEhc5ERIBFzkRMxEzETMRMzEwASInNRYzMjY3ASMSFREhESEBMwI1ESERFAYD+HJTXUlyagX9CQkT/usBhwJ7Bw8BF/H+UhbyFFllBE7+2339UAW2/HsBHXcB8fpKz98AAAABAKD+FASoBHMAHAA6QB0CEhIPGgcPBx0eEg8WEA8PFRYKXVkWEAAFXVkAGwA/KwAYPysAGD8/ERI5ERIBOTkRMxEzETMxMAEiJzUWMzI1ETQjIgYVESERMxczNjYzMhYVERQGAz1rTTs8e7SAcv7P6SkTMrB0w8q8/hQZ8BOqAvDbq8b98gRej09V08f8rrPAAAD//wB3/+wF5wb+AiYAMgAAAQcBTQDDAVIAE0ALAhkFJgICGRgGACUBKzUAKzUAAAD//wBc/+wEmAWsAiYAUgAAAQYBTQwAAAu2AgAdHBMMJQErNQD//wB3/+wF5wd9AiYAMgAAAQcBTgDDAVIAE0ALAhkFJgIAHBYGACUBKzUAKzUAAAD//wBc/+wEmAYrAiYAUgAAAQYBTgwAAA65AAL//7QgGhMMJQErNQAA//8Ad//sBecHcwImADIAAAEHAVMBQgFSABdADQMCKAUmAwJtFyUGACUBKzU1ACs1NQAAAP//AFz/7ASYBiECJgBSAAABBgFTewAADbcDAlsbKRMMJQErNTUAAAAAAgB3/+wHUAXNABYAIwCLQFUaBxEVFSAPAAATIAcEJCURFGlZRhEB1hEBEhEBAyERAbERAQRMEQGjEQERHgxJGREBAw8RjxECCQYREQENARVpWQESDRBpWQ0DChdpWQoEBB1pWQQSAD8rABg/KwAYPysAGD8rERIAORgvX15dX10rXV1fXXFfcV1xKxESARc5ETMRMxEzETMxMCEhBgYjIAAREAAhMhYXIRUhESEVIREhASIGFRQWMzI2NxEmJgdQ/Jcmji3+wf6wAVMBPj2EIwNk/c0CDv3yAjP7uKasrKRBeiYjhQkLAYoBaQFrAYMOCf7+v/7+hwPL+/P0+RUSA4sTFgADAFz/7Ad7BHMAHwArADIAgUBJHRQOAgIWIAgVFhYmFDAwJggDMzQCDgAQDxUBDQUvFWZZpS8BaS8BDC8cLwIQAy8vBAsQLF9ZEBALKV1ZCxAAGWBZBCNdWQAEFgA/MysrABg/KwAYPysREgA5GC9fXl1dXSsAX15dERI5ORESARc5ETMRMxEzETMRMxEzETMxMAUgJwYjIiYCNRAAITIWFzYzMgAVFSEVFhYzMjY3FQYGARQWMzI2NTQmIyIGJSIGByEmJgWm/vGVjfqi+IUBGwECcMhHkO/0ARD9FgeVhWu6ZFG9+2Zte3prbHt6bAPqXnwJAcICdRSbm4wBCLEBFQEtT02c/vLulAh/jSou7CcoAkWmqqmnpqalwnN5b30AAAD//wC4AAAFSAdzAiYANQAAAQcAdgCRAVIAE0ALAkwXHAwQJQIfBSYAKzUBKzUAAAD//wCgAAADkwYhAiYAVQAAAQYAdgYAAAu2AWcSFgsCJQErNQD//wC4/jsFSAW2AiYANQAAAQcCOQC0AAAADrkAAv++tB8cDBQlASs1//8AY/47A3cEcwImAFUAAAEHAjn/BQAAAA65AAH/9rQUFQoJJQErNf//ALgAAAVIB3MCJgA1AAABBwFMADMBUgAWuQAC/+5ACR8YDBAlAhwFJgArNQErNf//AFMAAAO4BiECJgBVAAABBgFMmQAADrkAAf/6tBoSCwIlASs1AAD//wBe/+wEFwdzAiYANgAAAQcAdgBOAVIAE0ALAXsoLBQaJQEwBSYAKzUBKzUAAAD//wBc/+wDrAYhAiYAVgAAAQYAdgoAAAu2AXInKxQAJQErNQD//wBe/+wEFwdzAiYANgAAAQcBS//qAVIAE0ALARctNRQaJQE1BSYAKzUBKzUAAAD//wBc/+wDwgYhAiYAVgAAAQYBS6MAAAu2AQssMxQAJQErNQD//wBe/hQEFwXLAiYANgAAAQcAegFiAAAADrkAAf/mtC4oBgAlASs1//8AXP4UA6wEcwImAFYAAAEHAHoBLQAAAA65AAH/57QsJhQAJQErNf//AF7/7AQXB3MCJgA2AAABBwFM/+oBUgATQAsBFzAoFBolAS0FJgArNQErNQAAAP//AFz/7APMBiECJgBWAAABBgFMrQAAC7YBGy4nFBklASs1AP//ACn+OwR5BbYCJgA3AAABBgI5KQAAC7YBAAsMAQAlASs1AP//AC/+OwM3BUwCJgBXAAABBgI5zgAADrkAAf/7tBkaCAMlASs1AAD//wApAAAEeQdzAiYANwAAAQcBTP/mAVIAE0ALAQ0FJgEBEQkEBiUBKzUAKzUAAAD//wAv/+wDxAYoAiYAVwAAAQcCOAEOABQAF0AOARlACQtIGQF6Hh4PDyUBKzUAESs1AAAAAAEAKQAABHkFtgAPAEVAIw4AAwEMAAAFAQoBBwMQEQ8DBANpWQwEBAgBEgsHCAdpWQgDAD8rEQAzGD8SOS8zKxEAMxESARc5ETMzETMRMxEzMTAhIREjNTMRIREhESERMxUjAuz+yvj4/nMEUP5z9/cCVP4BYgEC/v7+nv4AAAAAAQAv/+wDNwVMAB4AXEAuGhwLDwkUGBwcDQkWAwkDHyAbCwwLZVkYDAwGFBMTERQOFxQXYFkUDwYAXVkGFgA/KwAYPysRADMRMzMYLxESOS8zKxEAMxESATk5ETMRMzMRMzMRMzMRMzEwJTI3FQYGIyImNTUjNTM1IzU3NzMVIRUhFSEVIRUUFgJ3UHA0lUm6qn9/kqhYwwE5/scBFv7qSd8j4xkatryUxsGBZuzu5cHGlEE+AAAA//8Arv/sBV4HYAImADgAAAEHAVIAnAFSABNACwEXBSYBAxcjCQElASs1ACs1AAAA//8Amv/sBKIGDgImAFgAAAEGAVIxAAALtgEAGSUJFCUBKzUA//8Arv/sBV4G/gImADgAAAEHAU0AmgFSABNACwEWBSYBAhYVCQElASs1ACs1AAAA//8Amv/sBKIFrAImAFgAAAEGAU0vAAAOuQAB//+0GBcJFCUBKzUAAP//AK7/7AVeB30CJgA4AAABBwFOAJoBUgATQAsBFgUmAQAZEwkBJQErNQArNQAAAP//AJr/7ASiBisCJgBYAAABBgFOMQAAC7YBABsVCRQlASs1AP//AK7/7AVeCAQCJgA4AAABBwFQALgBUgAbQBACAQAWARYFJgIBARkTCQElASs1NQArXTU1AAAA//8Amv/sBKIGsgImAFgAAAEGAVBOAAAQsQIBuP//tBsVCRQlASs1Nf//AK7/7AVeB3MCJgA4AAABBwFTAR0BUgAXQA0CASUFJgIBcRQiCQElASs1NQArNTUAAAD//wCa/+wE1QYhAiYAWAAAAQcBUwC8AAAADbcCAXgWJAkUJQErNTUA//8Arv4UBV4FtgImADgAAAEHAVECSAAAAAu2ARceGQkBJQErNQAAAP//AJr+FASiBF4CJgBYAAABBwFRAvgAAAAOuQAB/+K0IyQAFCUBKzX//wAAAAAHvAdzAiYAOgAAAQcBSwFxAVIAE0ALASsFJgEAJCsLHSUBKzUAKzUAAAD//wAUAAAGxQYhAiYAWgAAAQcBSwEAAAAAC7YBACIpBholASs1AAAA//8AAAAABP4HcwImADwAAAEHAUsAFAFSABNACwEWBSYBAQ8WBwIlASs1ACs1AAAA//8AAP4UBI0GIQImAFwAAAEGAUvcAAALtgECHSQACSUBKzUA//8AAAAABP4HVgImADwAAAEHAGoAEgFSABdADQIBHgUmAgEBCRsHAiUBKzU1ACs1NQAAAP//ADEAAARxB3MCJgA9AAABBwB2AE4BUgATQAsBEgUmAWkLDwIJJQErNQArNQAAAP//ADcAAAOqBiECJgBdAAABBgB2FAAAC7YBkAsPAgklASs1AP//ADEAAARxB2YCJgA9AAABBwFPARcBUgAVtAERBSYBuP//tAoOAgklASs1ACs1AP//ADcAAAOqBhQCJgBdAAABBwFPALYAAAAOuQAB//+0Cg4CCSUBKzX//wAxAAAEcQdzAiYAPQAAAQcBTP/xAVIAE0ALAQwSCwUGJQEPBSYAKzUBKzUAAAD//wA3AAADuAYhAiYAXQAAAQYBTJkAAAu2AQ4SCwUGJQErNQAAAQCgAAADPwYfAAwAIUAPCg4DBAQNDgQVCABdWQgBAD8rABg/ERIBOREzETMxMAEiFREhETQ2MzIXByYCUH/+z7zNnnhHXAUtiftcBLC/sC/gHQAAAQDF/hQELwXLAB0ATEAmHAAMBQoaAAAOCgoUHh8LHQ4aGh1gWRoaEgMSF11ZEgcDCF1ZAxsAPysAGD8rERIAORgvKxEAMxEzERIBOTkRMzMRMxEzMxEzMTAFFAYjIic1FjMyNREjNTc1NDYzMhcHJiMiFRUzFSMC6bywa007O32oqK/ClnBIUj9t5OR5scIZ8BOqA3GTUlK9si/gHYlG5QAAAAAEAAAAAAWFB6oAEAAXACEALQBwQEQDLwgRAhcJKAsCFAAiHiIUGQkLBi4vGBgcQBgbSBxACxNIHA4XBmlZHxcBPxdvF98X7xcEFwkCFBcECCUPDgEOKgQIEgA/My/EXTISFzkvXXErABgQxCsrOS8REgEXOREzETMRMxEzETMyETMxMAEUBwEhAyEDIQEmNTQ2MzIWAwMmJwYHAxM1NjY3IRUGBgcTNCYjIgYVFBczNjYDvi8B9v60av3pbP60AfQriHBtkDFmVg4TRHBMLmoWAVYXt2wPNioqN1YTJjIFtlU++t0BSv62BSM6V26AgfwtASHmRUfE/r8EixAqeB8MGnQ3/tstMzMtXAQCMwAAAAUAVv/sBDsHqgAJACIALAA4AEQAj0BVHCcSPzMtOSIWLAYsOQEzEgZFRs8E3wTvBAMEgC8AAQA8DzYBCgM2QjBADBFIMB4MCh4WI2dZTxZfFgIDHxYBFhYeChUPGQEMBh4ZYFkeEA8pX1kPFgA/KwAYPysAX15dGD8SOS9dX10rERIAORgQ1isy1F9eXTLWXRrMXRESARc5ETMzETMRMxEzMzEwATU2NjchFQYGBxMnIwYGIyImNTQ2Nzc1NCMiByc2MzIWFREBBwYGFRQzMjY1ExQGIyImNTQ2MzIWBzQmIyIGFRQWMzI2AdcuahYBVhWkgMQ7CE2jg6G5+fvCroa1ZcHr4fD+0XaFgpRqf2WOcHCIh3FukJ42Kio3MTAqNgbZECp4HwwYaUT5J5hhS7iqsqkJBjGqUc5lxMj9FwIGBARYWoF6ZQP0bISAbmyBhGktMzMtLTQ0AAAA//8AAAAAByUHcwImAIgAAAEHAHYCoAFSABa5AAL/d0AJFBwGByUCHAUmACs1ASs1//8AVv/sBv4GIQImAKgAAAEHAHYB0QAAAAu2A185PhQfJQErNQAAAP//AHf/pgXnB3MCJgCaAAABBwB2ATEBUgATQAsDKwUmA24kKAoAJQErNQArNQAAAP//AFz/tASYBiECJgC6AAABBgB2ewAAC7YDbSUpCgAlASs1AP//AF7+OwQXBcsCJgA2AAABBgI5+wAADrkAAf/qtCssBQAlASs1AAD//wBc/jsDrARzAiYAVgAAAQYCOcgAAA65AAH/zrQuKxQAJQErNQAAAAEAugTZBB8GIQANAB5ADgUNDg8CCIAADwVfBQIFAC9dMxrMORESATk5MTABJicGByM1NjchFhYXFQNUnU1Ol8u9QwFlH5lIBNldU1FfG71wNLNGGwAAAAABALoE2QQfBiEADgAeQA4IAQ8QDA4JgA8FXwUCBQAvXRrMMjkREgE5OTEwARUGBgchJiYnNTMWFzY3BB9UjR/+mx1sd8uTUlSWBiEbVKcyL4N7G11TV1kAAQEbBNkDwQWsAAMAHEAOAwIEBUcAAQAPA18DAgMAL10zXRESATk5MTABIRUhARsCpv1aBazTAAABAOME2QP2BisADQAqQBgGAA4PDQ8GAQZGC1YLZgsDBgsPA18DAgMAL10zM10vXTMREgE5OTEwAQYGIyImJzMeAjMyNwP2DNymrs8IqgQvVVXOEAYrmri2nC82GH0AAAABAJME6QHfBhQACAATtgQAAAkKAgcALzMREgE5ETMxMBM0MzIVFAYjIpOmplNTpgV/lZVHTwAAAAIBVATXA0oGsgALABcAJkASEgYADAYMGBkPCcAVDwNfAwIDAC9dMxrMMhESATk5ETMRMzEwARQGIyImNTQ2MzIWBzQmIyIGFRQWMzI2A0qOcHCIh3FukJ42Kio2MDAqNgXHbISAbmyBhGktMzMtLTQ0AAAAAQAK/hQBoAAAABEAHUAMDwYAAAsLEhMDCBsOAC8/MxESATkRMxEzMzEwFxQWMzI3FQYjIiY1NDY3MwYG3S0jNzxSSnGJTGizRk7jKigSshd/Z0N2TUJtAAEAzwTXBAwGDgAXADpAIwQQGBkPBwQTSQABAA8HXwdvB38H7wcFB0YMAQcMDxNfEwITAC9dMzNdL10zXRDEEMYREgE5OTEwASIGByM2NjMyHgIzMjY3MwYGIyIuAgHJHzkNlQuPdilPTUokHzkNlQuRdClPTUoFQjU2kaQhJyA0NpGkISchAAACAJwE2QQZBiEACQASACFAEQ8GCgAEExQNBIAKDwBfAAIAAC9dMhrMMhESARc5MTATNTY2NyEVBgYHMzU2NyEVBgYHnD5vFQEtIORK7Is4AS0ZyWwE2RtVrykVNcg2G8BtFSu3UQAAAAEB1wTZA0QGXgAIABhACwgFCQoDDwhfCAIIAC9dxBESATk5MTABNjY3IRUGByMB1xI1CwEbTm2yBPg23FQYurMAAAAAAwC6BPgD4wa0AAsAFgAfADhAHAYAHB8RDAwfAAMgIRqAHx8JDgMDFFAJAYAJAQkAL11xMzMRMxI5LxrMERIBFzkRMxEzETMxMBM0NjMyFhUUBiMiJiU0MzIWFRQGIyImJzY2NyEVBgcjukc6OUpKOTpHAiODOUpKOTxH9g8nCAEGOoCKBX1HQEBHREFBRIdAR0RBQU4xvEAUgLP////IAAAFhQX1AiYAJAAAAQcBVP3x/5cAKEAVAn8Pjw8CAA8/D1AP3w/vD/APBg8CuP8xtBMTBQUlASs1ABFdcTUAAP//AHUCJwHTA3sABwARAAACQgAA////nQAABJEF9QAnACgAjwAAAQcBVP3G/5cALkAaAX8Pjw8CAA8/D1AP3w/vD/APBg8BEBEgEQK4/8O0ERECAiUBK101ABFdcTUAAP///50AAAX1BfUAJwArAI8AAAEHAVT9xv+XAC5AGgF/D48PAgAPPw9QD98P7w/wDwYPARARIBECuP/DtBERBgYlAStdNQARXXE1AAD///+dAAAD1wX1ACcALAD8AAABBwFU/cb/lwAoQBUBfw+PDwIADz8PUA/fD+8P8A8GDwG4/8y0EREGBiUBKzUAEV1xNf///8b/7AY5BfUAJgAyUgABBwFU/e//lwAwQBwCfxmPGQIAGT8ZUBnfGe8Z8BkGGQIAGzAbYBsDuP/ItBsbBgwlAStdNQARXXE1AAD///+IAAAGHQX1ACcAPAEfAAABBwFU/bH/lwAoQBUBfw+PDwIADz8PUA/fD+8P8A8GDwG4/9a0Dg4HByUBKzUAEV1xNf///8YAAAZsBfUAJgF2WgABBwFU/e//lwAvQCQBfySPJAIAJD8kUCTfJO8k8CQGJAEAJjAmYCZwJgRsJiYNDSUBK101ABFdcTUAAAD////J/+wDFwa0AiYBhgAAAQcBVf8PAAAAEEAJAwIBJQ4fDQAlASs1NTUAAP//AAAAAAWFBbwCBgAkAAD//wC4AAAE9AW2AgYAJQAAAAEAuAAABFQFtgAFAB9ADgMEBAEGBwQSBQJpWQUDAD8rABg/ERIBOTkRMzEwAREhESERBFT9mv7KBbb/APtKBbYAAAD//wA5AAAFCgW8AgYCKAAA//8AuAAABAIFtgIGACgAAP//ADEAAARxBbYCBgA9AAD//wC4AAAFZgW2AgYAKwAAAAMAd//sBecFzQADAA8AGwBtQEQQCgQWFgECCgQcHQMCaVkDHx1J3QMBKgMBqgMBTAMBAz4DvgMCBBkDARkDAQMPA48DAgkGAwMHDQ0ZaVkNBAcTaVkHEwA/KwAYPysREgA5GC9fXl1fXXFfXV9dXXFdKysREgEXOREzETMxMAEVITUFEAAhIAAREAAhIAABFBYzMjY1NCYjIgYEM/34A7z+mP6w/rD+mAFpAVEBUQFl+8+/ur28vLu7wANm/v6J/pX+egGGAW0BbQGB/nz+lPL7+/Ly/PsA//8AQgAAAtsFtgIGACwAAP//ALgAAAVQBbYCBgAuAAAAAQAAAAAFMwW2AAwAGkALCA4FDQAGCQUSBgMAPz8zEjkRATMRMzEwAQYGBwEhASEBIQEmJgKaDDMN/uv+xwHwAVIB8f7H/u8KPASyPt4p/JMFtvpKA28d8AAAAP//ALgAAAbTBbYCBgAwAAD//wC4AAAFyQW2AgYAMQAAAAMAUgAABD8FtgADAAcACwBpQEMJBgIDBwoGDA0AA2lZRgAB1gABEgABAyEAAbEAAQRMAAGjAAEAHgxJGQABAw8AjwACCQYAAAoEBAdpWQQDCgtpWQoSAD8rABg/KxESADkYL19eXV9dK11dX11xX3FdcSsREgEXOTEwEyEVIQMhFSEBESERzQL4/QhSA5z8ZAPE/BMDd/4DPf78SP8AAQAAAAD//wB3/+wF5wXNAgYAMgAAAAEAuAAABT0FtgAHACVAEQQFAAEFAQgJAQUSBgNpWQYDAD8rABg/MxESATk5ETMRMzEwISERIREhESEFPf7L/eb+ygSFBLT7TAW2AP//ALgAAASqBbYCBgAzAAAAAQBOAAAEeQW2AA8ARUAiDwYKCQsLAwAKAgACEBECCgAEAwcEB2lZBAMBDQANaVkAEgA/KxEAMxg/KxEAMxESOTkREgE5OREzETMzETMRMzMxMDM1AQE1IRUhIicBATYzIRFOAdf+NQPj/kozsAHG/iPwLQHP9AIKAcvt/gv+Pf30DP8AAAAA//8AKQAABHkFtgIGADcAAP//AAAAAAT+BbYCBgA8AAAAAwBc/+wGhQXLAAgAIgArAFJAKicPFwAiIhQrCRwEBAkPAywtIAEqCwsqa1kHGCQTEyRrWRMLEwsJFQQJEwA/PxI5OS8vKxESADk5KxESADk5ERIBFzkRMxEzMzMRMzMRMzEwATMyNjU0JiMjATUjIiQCNTQ2JDMzNSEVMzIEFhUUAgQjIxUBIyIGFRQWMzMD/A6ht6qTKf7pFrr+6KGPAQ63NQEXNbYBDpCh/ui6Fv7pKZOqtKQOAbypk4il+8fhgwEGoJv5jbS0jfmboP76g+EEOaSJkKz//wAAAAAFVgW2AgYAOwAAAAEAbQAABpYFtgAbAEBAIA4LFQUFEgYAGRkGCwMcHRURBAgIEWtZCAgGGhMMAwYSAD8/MzMSOS8rEQAzETMREgEXOREzETMzETMRMzEwARAAISMRIREjIAARESERFBYzMxEhETMyNjURIQaW/tb+0zP+6jP+z/7bASKWsh8BFh+umgEjA9f+5/72/kwBtAEJARYB4/4hnYwDCPz4kZQB4wABADcAAAYSBc0AIABGQCQZFAgDDQcKFxoUHh4aCg0EISIbBgkRAGlZEQQXCQgJaVkaCBIAPzMrEQAzGD8rEQAzMxESARc5ETMRMxEzETMzETMxMAEiBhUUFhcRIREhJgI1NBIkMyAAERQCByERIRE2EjU0JgMltcSEhv2BAXOYpasBPNEBPwF5ppsBdv19i4TFBMvcyMv9SP7pAQRdAUHGuAEXlv6y/ufG/sRg/vwBF0gBAsjH2wD//wA5AAAC5wdWAiYALAAAAQcAav8iAVIAF0ANAgEhBSYCAQEMHgYLJQErNTUAKzU1AAAA//8AAAAABP4HVgImADwAAAEHAGoAFAFSABdADQIBHgUmAgEDCRsHAiUBKzU1ACs1NQAAAP//AFz/7AUABl4CJgF+AAABBgFUMQAAC7YCNDMwDxklASs1AP//AE7/7AQlBl4CJgGCAAABBgFUJQAAC7YBWy8sFhwlASs1AP//AKD+FASoBl4CJgGEAAABBgFUdwAAC7YBYB0aCxQlASs1AP//AKD/7AMXBl4CJgGGAAABBwFU/w0AAAALtgEFFhUNACUBKzUAAAD//wCP/+4EvAa0AiYBkgAAAQYBVUQAABKyAwIBuP/ttBYnBBMlASs1NTUAAAACAFz/7AUABHEACwAqAERAJCIZCQ8dBBkWFgQPAyssFikMGA8SB11ZEhAlH2FZDABdWSUMFgA/MysrABg/KwAYPxI5ORESARc5ETMRMxEzETMxMCUyNjc1NCYjIhEUFhciAhEQEjMyFhczNjczBgYVERQzMjcVBgYjIiYnIwYCb3ZrBG9712sUzenz2naZMg8YK/wgJ1QgGxBbHnB2IhVu34+zDLSc/qylpfMBMAEPARYBMFRUXjdh+2j+yHYK8AoQTVqnAAAAAAIAoP4UBQAGHwAUACkAZEA3GBAQEQYHByIDJwoeHiciEQQqKwYiIyMiXVlpIwEiIwENIwEMBCMjDQARGwAVXlkAAQ0bXVkNFgA/KwAYPysAGD8REjkvX15dXV0rERIAORESARc5ETMRMxEzETMRMxEzMTABMgQVFAYHFRYWFRQEIyInESERNCQXIhERFhYzMjY1NCYjIzUzMjY1NCYCtusBD5iNvLn+++TIfv7PARj23TCMPIB8hX9INWNubgYf0LmVrRcGGMGu0vI//ekGNOD37v76/PofJ3xwbnPybWZcZAAAAAEAAv4UBIsEXgATACJAEBAEAQUDFBUKBAQBDwUPARsAPz8zEjkRMxESARc5MzEwASE0EjcBIRMWFhczNjY3EyEBBgICi/69OCz+VgE9pBVCDgYEOR+kAT3+Yy02/hRWAR6EBFL+Ez7ySSzlWQH8+7R0/ucAAgBc/+wEmAYfAB0AKQBDQCIIFQAPJBsPAhUeHgIbAyorEiEhACcFBQxgWQUBGCddWRgWAD8rABg/KxESADk5ETMREgEXOREzETMRMxEzETMxMAEmNTQ2MzIWFwcmJiMiBhUUFhcWFhUQACEiJDU0NgE0JicGBhUUFjMyNgHl7fHVb9GNeVysWElKioq4rf7m/vf0/tvBAkNfaXuHeGlvegOWl7+Qoy1C1y03Ni42aUZe9qD+/f7v+NK28P6QXZY6I7V+ZX2IAAABAE7/7AQlBHMAJgB3QEcUExMFBRAjFhwLCwEWEAQnKBQCJiYCe1lrJgFZJgG5JgEEJgF0JgFiJvImAjwmzCYCBAsmAQoGJiYNGRkgYFkZEA0HXlkNFgA/KwAYPysREgA5GC9fXl1fXV1dcV1xcSsREgA5ERIBFzkRMxEzETMRMxEzMTABFSMiBhUUITI2NxUGISAkNTQ2NzUmNTQ2MzIWFwcmJiMiBhUUFjMDSKiSkwEMZ9xZrP76/vb+54CQ1f7qc+lYXneKTXFuhY8CsNNBSH0tKfRNpaRrhhwKMdGNmC4m3TAfMjZCNwAAAAEAXP6FA/IGFAAgADZAGQ4DBhUAEhkLAAYLBiEiHgkDFRITEl9ZEwAAPysRADMYLy4zERIBOTkRMxEzMxEzETMzMTAFFAchNjY1NCYnJBE0ACUGBiMhNSEVBgACFRQeAhcWFgPyiv7NRU9PZv5IAQYBNByPM/7eA1bJ/viFJ0lnQaaeJZXBXZsvICkTTQF+0QGm/AcK37an/tb+55NKWjUcDSF9AAAAAAEAoP4UBKgEcwAUAC9AFw0KAAEKARUWDQoRCw8KFQEbEQVdWREQAD8rABg/Pz8REjkREgE5OREzETMxMAEhETQmIyIGFREhETMXMzY2MzIWFQSo/s9WXoBy/s/pKREzs3LDyv4UBHl5eavG/fIEXo9RU9PHAAAAAAMAXP/sBJYGHwALABIAGQBuQEIWEBAGABcPBg8aGxYQYFncFgEDTBYBvBYBBH4WAW0W7RYCFiQOSSoWAQMvFgEPFgEKBhYWAwkJE19ZCQEDDF9ZAxYAPysAGD8rERIAORgvX15dcV9dK11dX11xX10rERIBOTkRMzMRMxEzMTABEAAhIAAREAAhIAABMjY3IRYWEyIGByEmJgSW/vP+7v73/u4BCwEQAQgBF/3hdWsF/jcEaXlsbgkBxghpAwb+bv54AZcBgwGTAYb+bPw+5uvo6QR50vPj4gAAAAABAKD/7AMXBF4ADQAfQA4BDAwHDg8NDwkEXVkJFgA/KwAYPxESATk5ETMxMAERFBYzMjcVBiMiJjURAdFJPFFwbZq+sgRe/QBBPiPjM7m5AwAAAP//AKAAAAT2BF4CBgD6AAAAAQAI/+wE4QYhACIAMEAZFQEHAAQjJAEfHwoAFQoFYVkKARgSYVkYFgA/KwAYPysAGD8SOREzERIBFzkxMDMBJyYmIyIHNTYzMhYWFwEWFjMyNxUGBiMiJicCJicjBgcDCAHZIyRcXzI0T1dzo3MzARklTDchJBdyJ26MKXIrDQYuHs4EIVxaSg38EUaTjvz8aGIK7AwSbHcBQ4U0mkz+GwAAAP//AKD+FASoBF4CBgB3AAAAAQAGAAAEcwReAA0AGkALAAgODwQNBwAPDRUAPz8yETkREgE5OTEwEyETFzM2EhEhEAICByEGATnaRQhzZgE0WL+g/u4EXv2U5JkBjAEr/wD+jf61oAAAAAEAXP6FA/IGFAAuAF5AMCYJEwQcDxgjHAATDAYjKSkGAAMvMCAsBBkWFhliWQ0WAQ4EFhYvDSYPDA0MX1kNAAA/KxEAMxgvERI5L19eXSsREgA5LjMREgEXOREzETMzETMRMzMRMxEzMzEwEzQ2NzUmNTQ2NwYjIzUhFSMiBhUUFjMzFSMiBhUUFhYXFhYVFAchNjY1NCYnJiZckonbh5nNQxYDJEu564GTpqitmi9hiKaeiv7NRU9PZtnfAbZ+vzYKNMprgSUN39KJdV9S0nt7R1U1GyF9ZpXBXZsvICkTJtoA//8ARv/sBIIEcwAGAFLqAAABABn/7AWiBF4AFwA9QB4TAwMVDwwNFQkNCRgZDRUUCw8RD15ZEQ8GAF5ZBhYAPysAGD8rEQAzMxg/ERIBOTkRMxEzMxEzETMxMCUyNxUGBiMiJjURIREhESM1NyEVIxEUFgUUQz8pfzaWnf6u/s/psgTX7DbbI9sZHqSjAkL8iwN1g2bp/cozMQAAAAIAef4UBJYEcwARAB4ALUAWBgoAHAocHyAKGw4SXVkOEAMZXVkDFgA/KwAYPysAGD8REgE5OREzETMxMAEQAiMiJyMWFREhERAAMzIWEiUiBhURFhYzMjY1NCYElvXamn8SEP7NARX/m+yC/fFxait0PHJjYQIv/u/+zk2sYP7nBB0BEwEvjf74n5ml/vgrK52xsJ4AAAAAAQBc/oUD8gRzABwALkAVBRIVFwsAEhcAFx0eDxoVAwhdWQMQAD8rABgvLjMREgE5OREzETMRMxEzMTATEAAhMhcHJiMiBhUUFhYXFhYVFAchNjU0JicmJlwBDQEiuK1YrWh+cjJkhaediv7NlFJj2d8B7gFIAT1Q6ELEw0peOR0jgmqYzNZhJCsUKukAAAAAAgBc/+wFEARgAA0AGgA1QBoMFAsADgcAFAcUGxwMGAkYX1kJDwQRXVkEFgA/KwAYPysRADMREgE5OREzETMRMxEzMTABFAYGIyAAERAhIRUhFgUUFjMyNjU0JicjIgYEqIf7p/78/uECdwI9/uS0/O53dnR4O0QyooYB25LjegEtAQgCP9++opykmZBvtVOkAAAAAAEAKf/sBAAEXgASAC1AFw4BDBIHDAMTFAAOEA5gWRAPCQRdWQkWAD8rABg/KxEAMxESARc5ETMzMTABERQWMzI3FQYjIiY1ESE1NyEVAnNJPFBwa5u+sv7nsAMnA3n95UE+I+MzubkCG39m5QAAAAABAI//7gS8BF4AFQApQBMPDAYDEwwDDBYXDwQPAAldWQAWAD8rABg/MxESATk5ETMRMxEzMTAFIAIRESERFBYzMjY1NCYnIRYWFRAAApH+/f8BMmlyfXIcKwEzKB3+7xIBAwENAmD9lpKBustr1red7Xb+xv7KAAACAFz+FAX6BHUACQAjAEVAIxIKFQ4HIyMXCh4AAAoOAyQlERAKGxsDX1kbEAcXCxdgWSILAC8zKxEAMxg/KwAYPz8REgEXOREzETMzETMRMxEzMTABNCYjIgYVETY2AREkADU0EjcXBgYVEAURNDYzMgAVFAIEBxEE1V5aOUCJqP20/u7+5W543VpKAQ7WuuEBAJX+8bICTpynT2H9+gzM/GEB4B4BIPebAR+SkHrPef7sNAIEudf+3Pur/vubEf4gAAAAAf/P/hQEyQRtACAARkAjFwcPDyIIGB4YISIIGBUFFQURAAYPFxsAHGFZABARDGFZERsAPysAGD8rABg/PxESOTkREjk5ERIBOTkRMxEzETMyMTATMhYWFxcBIQETFhYzMjcVBiMiJicDASEBAyYmIyIHNTbwWnJQKUoBFwEz/jnDHEY9MTRVbn2fNGj+xv67AfaGGUY4ODtyBG0zcXvdAe39Bv4lQDUN7h+HngFG/ZUDdQFgRj4T9B8AAAABAI/+FAZGBhIAGQBBQCEGBBUSAQ0NGA4JBAQOEgMaGxkABhMPDhsBGA8YXlkMDxYAPzMrEQAzGD8/Mz8REgEXOREzETMzETMRMxEzMTABETY2NTQDIRIREAAFESERJAARESERFBYXEQPwsI1QARtO/tP+1/7l/t7+3AEjgKMGEvrHEaWz4AE8/uf++v7u/tIR/iYB2gkBIQETAjP9xa2TDAU7AAEAbf/sBnsEXgAnAENAISEfDhAQChkWJR8fFgoDKCkEDRcXByENDxwTBxNeWQAHFgA/MysRADMYPzMSOS8RORESARc5ETMRMxEzETMRMzEwBSImJyMGBiMiAhE0EjchAhEUFjMyNjURIREUFjMyNjUQAyEWEhUQAgS4epwpCi6bd9bsMEABJX1jYFNMARlMVF5kfQElQDHuFGlpbmQBLgEFmgEEof76/tGkrHSGASf+2YdzqaMBMwEGov7+m/75/tQAAP//AAz/7AMXBgQCJgGGAAABBwBq/vUAAAANtwIBKw4gDQAlASs1NQD//wCP/+4EvAYEAiYBkgAAAQYAaicAABCxAgG4//C0FigEEyUBKzU1//8ARv/sBIIGXgAmAFLqAAEGAVRCAAALtgJrIh8TDCUBKzUA//8Aj//uBLwGXgImAZIAAAEGAVROAAALtgE2HhsEEyUBKzUA//8Abf/sBnsGXgImAZYAAAEHAVQBNQAAAAu2AU4wLQolJQErNQAAAP//ALgAAAQCB1YCJgAoAAABBwBq//EBUgAXQA0CASEFJgIBAgweAgslASs1NQArNTUAAAAAAQAp/+4GBAW2AB4ASEAmFQkCFw8PEBwJCRASAx8gFw5pWRcXExASFhITEmlZEwMABWtZABMAPysAGD8rEQAzGD8SOS8rERIBFzkRMxEzETMzETMxMAUiJxEWMzI2NjU1NCYjIREhESERIREhESEyFhUVFAYEbXRXY0k2MhlTX/6w/sv+kQRa/koBXOT3zRImAQArH0Q3f1lH/V4EtAEC/v7+8M69gdHZAAAA//8AuAAABFQHcwImAWEAAAEHAHYAhQFSABNACwEOBSYBawcLBQElASs1ACs1AAAAAAEAd//sBSMFywAcAHJASQMGBhIZDAUSBB0eAwZpWQMfHUkqAwFdAwHdAwFMAwEDIRMUSG4DAQMeDEkZAwEDHwMBDwOPAwIJBgMDDxYWAGlZFgQPCWlZDxMAPysAGD8rERIAORgvX15dcV9dK10rXV1xcSsrERIBFzkRMxEzMTABIgYHIRUhFhYzMjcRBgYjIAARNBIkMzIWFwcnJgNKqtYMAnn9hQ3JvKvzas16/qj+lLIBTeKC3WxvV44Eyb+q/rLCTf78KCMBgwFq4wFXuDcw/CU8AAAA//8AXv/sBBcFywIGADYAAP//AEIAAALbBbYCBgAsAAD//wA5AAAC5wdWAiYALAAAAQcAav8iAVIAF0ANAgEhBSYCAQEMHgYLJQErNTUAKzU1AAAA////aP5SAe4FtgIGAC0AAAACABD/7AeiBbYAGgAjAG1AQQAbGwgEHx8IEQMkJQAjaVkwAKAAAuIAAQOBAAEEXQABBRUAAQMAcwACDAMAAAgZGQppWRkDCBtpWQgSDxRpWQ8TAD8rABg/KwAYPysREgA5GC9fXl1dX11fXV9dcSsREgEXOREzETMRMzEwATMgBBUUBCEhESEHAgIGIyInNRYzMjYSEhMhETMyNjU0JiMjBPpzAQ4BJ/7a/uD+af7dED5fsp9UQDozNT43WyADWF6Ng4OjSAOF6NTk5QS0hv4B/mWoFv4UYQEHAlcBC/tIZWZjWwACALgAAAeoBbYAEgAbAIpAUwsHBwgPExMMBAAXFwQIAxwdDxtpWQ8LAAsBDAYLBmlZRgsB1gsBEgsBAyELAbELAQSjCwFMCwE7CwEZCwEDDwuPCwIJBgsLCA0JAwgSBBNpWQQSAD8rABg/PzMSOS9fXl1fXV1dXV9dcV9xXXErAF9eXRgQxCsREgEXOREzETMzETMRMxEzMTABFAQhIREhESERIREhESERMyAEATMyNjU0JiMjB6j+2v7g/mn+I/7KATYB3QE1cwEOASf9WF6NhIegSAHJ5OUCd/2JBbb9wwI9/c/o/mFlZmVZAAEAKQAABgQFtgATAD5AIAwADgYGBxMAAAcJAxQVDgVpWQ4OCgAHEg0JCglpWQoDAD8rEQAzGD8zEjkvKxESARc5ETMRMxEzETMxMCERNCYjIREhESERIREhESEyFhURBM9GUP6U/sv+kQRa/koBgdDmAgJZR/1eBLQBAv7+/vDRuv3n//8AuAAABWAHcwImAbQAAAEHAHYA+AFSABNACwETBSYBWAwQBQAlASs1ACs1AAAA//8AFP/sBU4HkQImAb0AAAEHAjYAcwFSABNACwEYBSYBDxsVDgAlASs1ACs1AAAAAAEAuP5WBT0FtgALADBAGAgFAgMACQkDBQMMDQoGAwMiBQhpWQEFEgA/MysAGD8/MxESARc5ETMRMxEzMTAhIREhESERIREhESEFPf5U/tX+UgE2AhoBNf5WAaoFtvtMBLQAAP//AAAAAAWFBbwCBgAkAAAAAgC4AAAEvgW2AAsAFABlQDsKBAAMDAcEEAcQFRYAFGlZMACgAALiAAEDgQABBF0AAQUVAAEDAHMAAgwDAAAHCAgLaVkIAwcMaVkHEgA/KwAYPysREgA5GC9fXl1dX11fXV9dcSsREgE5OREzETMRMxEzMTABMyAEFRAhIREhESERMzI2NTQmIyMB7noBHgE4/aT+VgOc/ZponZKUtE8DhejU/jcFtv8A/EhlZmVZAP//ALgAAAT0BbYCBgAlAAD//wC4AAAEVAW2AgYBYQAAAAIACv5WBfQFtgANABMAP0AgEQwMDQYSCAkJEg0DFBUJDSIEDmlZBAMGEQALAGlZCxIAPysRADMzGD8rABg/MxESARc5ETMRMxEzETMxMBMzEhITIREzESERIREhAQYCByERCnGRqCkDVMP+1fxs/tUCySCVXQI7AQIBIgJDAU/7TP1UAar+VgZe5f4AzQOy//8AuAAABAIFtgIGACgAAAABAAAAAAeLBbYAEQBAQCIICgoTAREGDQ0DDgkOABEEEhMAAg8GDAkGEQcEAQMOCxESAD8zMz8zMxIXORESARc5ETMzETMRMxEzETMxMAEBIQERIREBIQEBIQERIREBIQII/hUBPwHZASEB2QFA/hQCCP60/hf+3/4X/rQC+AK+/TwCxP08AsT9Qv0IAuX9GwLl/RsAAQBe/+wE1wXLACYAekBJAwQEGyEMABsHEhIbFgwEJygDFhcXFmtZMRcBBBgXAUUXAcUXAVYXARckDUkqF6oXAgMPF58XAgoFFxcKJCQea1kkBAoQa1kKEwA/KwAYPysREgA5GC9fXl1fXStdXXFxX3ErERIAORESARc5ETMRMxEzETMRMzEwARQGBxUWFhUUBCEgJxEWFjMgNTQmIyM1MzI2NTQmIyIHJzYkMzIEBKrIq8nX/rn+3/6+w179bgFx7eiJe+jUhYXOwId9ARir7wEdBGCNuBkGFLaSyupPAQQtM9dhaPJYZktZd89TTcgAAQC4AAAF3QW2AA8AKEASBA8IDAkPCRARDQQPBgADCQ8SAD8zPzIROTkREgE5OREzMxEzMTATIREHBzMBIREhETQTIwEhuAEXBAoGAqMBc/7sEgj9Wv6LBbb9Pr3XBFb6SgK+jQEV+6AAAAD//wC4AAAF3QeRAiYBsgAAAQcCNgD8AVIAFbQBEwUmAbj//7QWEA8IJQErNQArNQAAAQC4AAAFYAW2AAoAMEAXCQAADAcDAwQECgsMBwIKAwQIBQMBBBIAPzM/MxIXORESATk5ETMRMxEzETMxMCEhAREhESERASEBBWD+oP3u/soBNgIMAUr96wLl/RsFtv08AsT9QgAAAAABABD/7AU9BbYAEwApQBQAAQoBFBUBEhIDaVkSAwgNaVkIEwA/KwAYPysAGD8REgE5OREzMTAhIREhBwICBiMiJzUWMzI2EhITIQU9/sv+mhA+X7OeVEA6MzU+N1sgA5sEtIb+Af5lqBb+FGEBBwJXAQsA//8AuAAABtMFtgIGADAAAP//ALgAAAVmBbYCBgArAAD//wB3/+wF5wXNAgYAMgAA//8AuAAABT0FtgIGAW4AAP//ALgAAASqBbYCBgAzAAD//wB3/+wE0QXLAgYAJgAA//8AKQAABHkFtgIGADcAAAABABT/7AVOBbYAFAApQBQADQcOAxUWDRAQBRQOAwUKaVkFEwA/KwAYPzMSOREzERIBFzkzMTABAQ4CIyInERYzMjY3ASEBMzY3EwVO/jtVlMuTfmxagVFmJP4GAUgBaQoPNfsFtvv2wqtTHgEKJEpiBBr80Th7AnwAAAD//wBc/+wGhQXLAgYBcwAA//8AAAAABVYFtgIGADsAAAABALj+VgYXBbYACwAyQBkIBQAJAgMDCQUDDA0KBgMDIgAIBQhpWQUSAD8rEQAzGD8/MxESARc5ETMRMxEzMTAlMxEhESERIREhESEFPdr+1fvMATYCGgE19v1gAaoFtvtMBLQAAQBtAAAFGwW2ABMAK0AVCwgAEQEIARQVBA5pWQQEARIJAwESAD8/MxI5LysREgE5OREzMxEzMTAhIREGBiMiJjURIREUFjMyNjcRIQUb/sqazV3R4wE1YnVSo3cBNgI1NCbJtgJc/fxqayEpAo8AAAEAuAAAB+cFtgALADFAGAgFAAkEAQEJBQMMDQoCBgMACAUIaVkFEgA/KxEAMxg/MzMREgEXOREzETMRMzEwASERIREhESERIREhBOwBxgE1+NEBNgHGATgBAgS0+koFtvtMBLQAAAAAAQC4/lYIwQW2AA8AO0AeDAkADQQBBgcHAQ0JBBARDgIKAwciBAAMCQxpWQkSAD8rEQAzMxg/PzMzERIBFzkRMxEzETMRMzEwASERIREzESERIREhESERIQTsAcYBNdr+1fkiATYBxgE4AQIEtPtA/WABqgW2+0wEtAAAAAIAAAAABXUFtgAMABUAY0A6BgkNDQQAEQQRFhcJFWlZMAmgCQLiCQEDgQkBBF0JAQUVCQEDCXMJAgwDCQkEBwcGaVkHAwQNaVkEEgA/KwAYPysREgA5GC9fXl1dX11fXV9dcSsREgE5OREzETMRMzIxMAEUBCEhESERIREzIAQBMzI2NTQmIyMFdf7P/tX+Vv6RAqR7AR4BOP0vaJ2SlLNQAcnk5QS0AQL9z+j+YWVmZVkAAAMAuAAABocFtgAKABMAFwBjQDsHCwsEAA8UFRUPBAMYGQcTaVkwB6AHAuIHAQOBBwEEXQcBBRUHAQMHcwcCDAMHBwQWBQMVEgQLaVkEEgA/KwAYPz8zEjkvX15dXV9dX11fXXErERIBFzkRMxEzETMRMzEwARQEISERIREzIAQBMzI2NTQmIyMBIREhBKD+0f7X/nABNmQBGQE1/U5RmY6JrEMEmf7LATUByeTlBbb9z+n+YmVmZlj9eQW2AAAAAAIAuAAABL4FtgAJABIAWEA0BgoKAwAOAw4TFAYSaVkwBqAGAuIGAQOBBgEEXQYBBRUGAQMGcwYCDAMGBgMEAwMKaVkDEgA/KwAYPxI5L19eXV1fXV9dX11xKxESATk5ETMRMxEzMTABECEhESERMyAEATMyNjU0JiMjBL79pP5WATZ6AR4BOP0waJ2SlLRPAcn+NwW2/c/o/mFlZmVZAAABAEj/7ATXBcsAGABzQEkEDgkWExMUDgMZGhUUaVkVHx1JKhUBXRUB3RUBTBUBFSETFEhuFQEVHgxJGRUBAx8VAQ8VjxUCCQYVFQwGBgBpWQYEDBFpWQwTAD8rABg/KxESADkYL19eXXFfXStdK11dcXErKxESARc5ETMzETMxMAEiBgcnNjMgABEQACEiJxEWMyATITUhJiYCKWO/XWLo/wFFAWP+k/6o7cPzqwF/Ev2GAngGwATJOCf6Z/5x/p3+lv59SwEETQF0/qq/AAAAAgC4/+wIGQXNABIAHQCEQFAMCAgJEw0GABgYBgkDHh8ADAEMBgwHaVlGDAHWDAESDAEDIQwBsQwBBKMMAUwMATsMARkMAQMPDI8MAgkGDAwJCgMJEhAbaVkQBAMWaVkDEwA/KwAYPysAGD8/EjkvX15dX11dXV1fXXFfcV1xKwBfXl0REgEXOREzETMzETMRMzEwARAAISAAAyERIREhESESACEgAAEUFjMgETQmIyIGCBn+r/7F/t/+tBr+6P7KATYBHiIBSQEYATwBTvwrqKEBTKWjpKkC3f6Y/ncBTQE+/YkFtv3DASEBM/54/pj0+QHt9Pr6AAAC//YAAASaBbYADQAWAEdAJQIDEhIGCxYMBgwXGAMAFQBpWQAVEBUCEAMVFQkMAhIJD2lZCQMAPysAGD8zEjkvX15dKxEAMxESATk5ETMzETMRMzIxMAEBIQEmJjU0JCEhESERESMiBhUUFjMzAqT+qv6oAaB8hAEdAQsB3P7KmXiEgISRAjH9zwKDMtGOydn6SgIxAodWZGFwAP//AFb/7AQ7BHUCBgBEAAAAAgBc/+wEngYfABkAJABHQCUHFA0iIgAUHAAcJSYRHl9ZABEBCgMRERcGBgdhWQYBFxpdWRcWAD8rABg/KxESADkYL19eXSsREgE5OREzETMRMxEzMTATEAAlNjY3EwYEDgIHMzY2MzIWFRAAISAABTIRECMiBgYHFBZcASUBN23deSP+/vV+USwHDzWuZM/m/t3/AP8A/uECMdnENmtZFYICngGBAY81ExkQ/vUdJCtQg21SWP3s/vD+0wFveAErASMyUSnL1wADAKAAAATLBF4ADwAYACAAeEBGAwQEEB0VFQsAGQcQEBkLAyEiAwQdHRR7WcUd1R0COR0BmR0BKB0BiB0BTR0BPB0BAwwdAQ0EHR0LDAwcYlkMDwsVYlkLFQA/KwAYPysREgA5GC9fXl1fXV1dcV1xXSsREgA5ERIBFzkRMxEzETMRMxEzETMxMAEUBgcVFhYVFAQjIREhMhYBNCYjIxEzMjYDNCMhFTMyNgSocW53i/8A7v3DAj3m5f7nZmby+GFlHKL/AN1hZAM5Wn8SCA6HY6OrBF6V/ZVCO/74SQIFZt04AAABAKAAAAOkBF4ABQAfQA4DBAQBBgcEFQUCYFkFDwA/KwAYPxESATk5ETMxMAEVIREhEQOk/i3+zwRe5fyHBF4AAgAd/m8FMQReAAUAEwBCQCEFCgoLEgAGBwcACwMUFQcLCUAQAmBZEA8SBQwJDF9ZCRUAPysRADMzGD8rABoYEM0yERIBFzkRMxEzETMRMzEwJREjBgIHASERIREhETM2EhMhETMDXOUZV00Dd/7u/RD+7l5gghoDFqTfApq6/rKS/ZABkf5vAnCVAcYBJPyBAAAA//8AXP/sBGIEcwIGAEgAAAABAAAAAAb8BF4AEQA7QCAHEw4DCgoACwUGCw8QBRITDwYMAAMJBg4EARAPCwgOFQA/MzM/MzMSFzkREgEXOREzMxEzMhEzMTABESERASEBASEBESERASEBASEC8AEcAY4BO/5kAcP+uv5W/uT+Vv66AcP+ZAE7Aj8CH/3hAh/96P26Ajf9yQI3/ckCRgIYAAABAE7/7AQjBHMAKAB3QEcSExMjChwQAxYjIwMnHAQpKhInKCgne1lrKAFZKAG5KAEEKAF0KAFiKPIoAjwozCgCBAsoAQoGKCgaDQ0GYFkNEBogXlkaFgA/KwAYPysREgA5GC9fXl1fXV1dcV1xcSsREgA5ERIBFzkRMxEzETMRMxEzMTABMjY1NCYjIgYHJzY2MzIWFRQHFRYWFRQGBiMgJzUWFjMyNjU0JiMjNQG2rZFqek3DUFp34IrR/N+JdYT6qf7ollbNYJWUnKJ2ArA4PTY2JiHVLSegib05CiJ9ZWaeVkX8KC5DPkRB0wAAAAABAKAAAAUjBF4ADQAoQBMDDAYKCgcMAw4PCgMMBA0PBwwVAD8zPzMSOTkREgEXOREzETMxMAERFAMBIREhETQ3ASERAccXAgQBb/7ZFP3+/pIEXv5GRv7wAxD7ogG+d9n88gReAAAA//8AoAAABSMGPwImAdIAAAEHAjYAlgAAAAu2AQIUDg0GJQErNQAAAAABAKAAAAT0BF4ACgAuQBcDDAoGBgcBAgcDCwwKAgUDBwAIDwQHFQA/Mz8zEhc5ERIBFzkRMxEzETMxMAEhAQEhAREhESERA30BUP5FAeL+pv43/s8BMQRe/ej9ugI3/ckEXv3hAAAAAAEAAP/sBIkEXgARAClAFAkAAQESEwEVEANgWRAPBwxhWQcWAD8rABg/KwAYPxESATkRMzIxMCEhESECAgYjIic1FjMyNhITIQSJ/s/+5yBcmXxqRDExOU09FgNOA3n+if6PpSD0FKQBfwFPAAEAoAAABiEEXgAYACpAFAwNAAENARkaCwITAw4HAQ0VFw4PAD8zPzMzEhc5ERIBOTkRMxEzMTAhIREHBgcDIwMmJycRIREhExYWFz4CEyEGIf7jEDYrxtnJKzET/uQBpMAeMwkhJSyxAaADcT7TbP4MAfhux0T8jwRe/iNNyEeWg24BsgAAAAABAKAAAASsBF4ACwBOQC8BCQkKBQIGCgYMDQEIYFkBJB8gSMoB2gECBgEBdgEBASQNSRgBAQEBCgMLDwYKFQA/Mz8zEjkvXStdcV0rKxESATk5ETMzETMRMzEwAREhESERIREhESERAdEBqgEx/s/+Vv7PBF7+UgGu+6IBzf4zBF4AAP//AFz/7ASYBHMCBgBSAAAAAQCgAAAEmAReAAcAJUARBQYBAgYCCAkCBhUHBGBZBw8APysAGD8zERIBOTkRMxEzMTABESERIREhEQSY/s7+a/7PBF77ogN5/IcEXgAAAP//AKD+FAS0BHMCBgBTAAD//wBc/+wD3QRzAgYARgAAAAEALwAABD0EXgAHACVAEgMEAQQGAwgJBBUCBgcGYFkHDwA/KxEAMxg/ERIBFzkRMzEwARUhESERITUEPf6S/s/+kQRe5fyHA3nlAP//AAD+FASNBF4CBgBcAAAAAwBc/hQGJwYUABEAGAAfAExAJxIMAB0HBw8VCAMZGQgMAyAhEAAIGxwWDxZgWQAPDx0VCRVgWQYJFQA/MysRADMYPzMrEQAzGD8/ERIBFzkRMxEzMzMRMzMRMzEwAQQAFRQABREhESQANTQAJREhARQWFxEGBgU0JicRNjYD0QEYAT7+xv7k/uX+5f7BATQBJgEb/cWahoGfA1iWh4WYBGQX/tTy9/7XF/4cAeQaAS/u/QElEwGw/BuNsRICoBG4h4S1E/1kErIAAAD//wAKAAAElgReAgYAWwAAAAEAoP5vBWQEXgALADNAGQgFAAkCAwMJBQMMDQMFCgYPAAgFCGBZBRUAPysRADMYPzMQxhESARc5ETMRMxEzMTAlMxEhESERIREhESEEwaP+7vxOATEBvgEy3/2QAZEEXvyHA3kAAAAAAQB7AAAEoAReABIAK0AVAREJBgoRChMUDgNfWQ4OCgcSDwoVAD8/MxI5LysREgE5OREzMxEzMTABERQzMjY3ESERIREGBiMiJjURAayHWJdNATH+z2q2VbfIBF7+Z5IoIAHj+6IBvDguu60BoAAAAAEAoAAAByEEXgALADFAGAQBCAUACQkFAQMMDQoGAg8IBAEEYFkBFQA/KxEAMxg/MzMREgEXOREzETMRMzEwISERIREhESERIREhByH5fwExAXcBMQF3ATEEXvyHA3n8hwN5AAABAKD+bwfFBF4ADwA8QB4IBQwJAA0CAwMNCQUEEBEDBQ4KBg8ADAgFCGBZBRUAPysRADMzGD8zMxDGERIBFzkRMxEzETMRMzEwJTMRIREhESERIREhESERIQchpP7t+e4BMQF3ATEBdwEx3/2QAZEEXvyHA3n8hwN5AAAAAgAAAAAFZgReAAsAEwBeQDgKARERCAUMCAwUFQEQYlkBJB8gSLoBygECZgH2AQIDASQNSQ8BAQoGAQEICwsKYFkLDwgRYlkIFQA/KwAYPysREgA5GC9fXl0rX11dKysREgE5OREzETMRMzIxMAERMyAWFRAhIREhNQE0JiMjETMyApbXAQL3/hP96/6cBDVnaNDUywRe/lCkpv6cA3nl/QJBOv74AAAAAwCgAAAGLQReAAoAEgAWAF5AOQAQEAgECxMUFAsIAxcYAA9iWQAkHyBIugDKAAJmAPYAAgMAJA1JDwABCgYAAAgVCQ8UFQgQYlkIFQA/KwAYPz8zEjkvX15dK19dXSsrERIBFzkRMxEzETMRMzEwATMgFhUUBiMhESEBNCYjIxEzMgUhESEB0ZMBAPb29f4xATEBWGhniY3LAwT+zwExAq6kprGzBF79AkE6/vjTBF4AAAAAAgCgAAAEogReAAkAEQBTQDIADw8HBAoHChITAA5iWQAkHyBIugDKAAJmAPYAAgMAJA1JDwABCgYAAAcIDwcPYlkHFQA/KwAYPxI5L19eXStfXV0rKxESATk5ETMRMxEzMTABMyAWFRAhIREhATQmIyMRMzIB0dcBAvj+Ev3sATEBoGhn0dXLAq6kpv6cBF79AkE6/vgAAAABAEr/7AO8BHMAFwBnQEAPAhUKBwcIAgMYGQkIZlkPCR8JAgkiIiNICSIZGkg5CQGZCQFNCQE8CQEDDAkBDQQJCQASEgxgWRIQAAVgWQAWAD8rABg/KxESADkYL19eXV9dXV1xKytxKxESARc5ETMzETMxMAUiJzUWMzITITUhJiMiByc2NjMgABEQAAGi0oaumd0T/loBpg/Id41WS71eAQYBAP7xFEXuUAEAy/c/0SMt/uT+5P7c/tUAAAIAoP/sBqgEcwASAB4AaEA/DAgICRMNBgAZGQYJAx8gDAdgWQwkHyBIugzKDAJmDPYMAgMMJA1JDwwBCgYMDAkKDwkVEBxdWRAQAxZdWQMWAD8rABg/KwAYPz8SOS9fXl0rX11dKysREgEXOREzETMzETMRMzEwARAAIyIkJyMRIREhETM2JDMyAAEUFjMyNjU0JiMiBgao/ur23f73HMn+zwExzR0BEdbtARn9JWJxb2JjcG9iAjH+7f7O+On+MwRe/lLW7f7J/vWnqamnp6WmAAAAAgAAAAAEHwReAA0AFgBHQCUBAg4OBQoSCwULFxgCDRENYlkAEUARAhIDEREICwEVCBRiWQgPAD8rABg/MxI5L19eXSsRADMREgE5OREzMxEzETMyMTAhIQEmJjU0NjMhESERIwMUFjMzESMiBgFK/rYBLWxv89ICCP7PqMluWarRS1UBui2qc6K4+6IBoAFiRk8BGkkAAP//AFz/7ARiBgQCJgBIAAABBgBqAgAADbcDAhEcLgoRJQErNTUAAAAAAQAE/hQEqAYUACYAb0A8AhkSEB0XDw8UEBkaJAcHGhADJygdECAaEhMSZVkXExMgFSALXVkAIBAgICADCQMgIBAVABAVAAVdWQAbAD8rABg/PxI5L19eXSsREgA5GC8zKxEAMxESORESARc5ETMRMxEzMxEzMxEzETMxMAEiJzUWMzI1ETQmIyIGFREhESM1MzUhFSEVIRUUBzM2MzIWFREUBgM9a007PHteVn9z/s+cnAExATv+xQ4SZt7Fyrz+FBnwE6oCsm5ur8L+LwSsx6GhxxJTtqTSx/zrs8AAAP//AKAAAAOqBiECJgHNAAABBgB2HQAAC7YBZwcLBQElASs1AAABAFz/7APwBHMAGQBmQEAOEREDCBgQAwQaGw4RZlkPDh8OAg4iIiNIDiIZGkg5DgGZDgFNDgE8DgEDDA4BDQQODgAGBgtgWQYQABRgWQAWAD8rABg/KxESADkYL19eXV9dXV1xKytxKxESARc5ETMRMzEwBSAAERAAITIXByYjIgYHIRUhFhYzMjY3FQYCjf7q/uUBDgEhuK1Yqmtpcw8Bpf5bDm5nT59mjhQBIwEaASoBIErZQXp9y4N9JCzqSf//AFz/7AOsBHMCBgBWAAD//wCTAAAB3wYUAgYATAAA////5QAAApMGBAImAPMAAAEHAGr+zgAAAA23AgEDBBYCAyUBKzU1AP///33+FAHfBhQCBgBNAAAAAgAA/+wG0wReABcAHwBoQD4IER0dABUYABggIREcYlkRJB8gSLoRyhECZhH2EQIDESQNSQ8RAQoGEREADw8CYFkPDwAdYlkAFQYLYVkGFgA/KwAYPysAGD8rERIAORgvX15dK19dXSsrERIBOTkRMxEzETMyMTAhESMCAgYjIic1FjMyNhITIREzMhYVECETNCYjIxEzMgMt7iBcmXxqRDExOU09FgMjjvjv/h+wYmGBhb8Def6J/o+lIPQUpAF/AU/+UKSm/pwBYEE6/vgAAAIAoAAABtMEXgARABkAb0BCDgoKCwAXFw8HBBISBwsDGhsAFmJZAA4OCWBZDiQfIEi6DsoOAmYO9g4CAw4kDUkPDgEKBg4OBxAMDwsVBxdiWQcVAD8rABg/PzMSOS9fXl0rX11dKysAGBDFKxESARc5ETMRMzMRMxEzETMxMAEzMhYVECEhESERIREhESERIQE0JiMjETMyBF6O+O/+H/47/qT+zwExAVwBMQFEYmGBhb8CrqSm/pwBzf4zBF7+UgGu/QJBOv74AAAAAAEABAAABKgGFAAcAF9AMwsJFhAICA0JEhMAAQETCQMdHhYJGRMLDAtlWRAMDBkOGQRdWQAZEBkgGQMZGQkOAAEJFQA/Mz8SOS9dKxESADkYLzMrEQAzERI5ERIBFzkRMxEzETMzETMzETMxMCEhETQjIgYVESERIzUzNSEVIRUhFRQHMzYzMhYVBKj+z7R/c/7PnJwBMQE7/sUOEmbexcoCUPKvwv4vBKzHoaHHElO2pNLH//8AoAAABPQGIQImAdQAAAEHAHYArgAAAAu2AVAMEAgDJQErNQAAAP//AAD+FASNBj8CJgBcAAABBgI2+wAAC7YBAh0XAAklASs1AAABAKD+bwTBBF4ACwAxQBgDAAkKBwQECgADDA0KAAUBDwADYFkIABUAPzIrABg/MxDGERIBFzkRMxEzETMxMDMRIREhESERIREhEaABMQG+ATL+eP7uBF78hwN5+6L+bwGRAAAAAQC4AAAEfQbsAAcAJ0ASAAEGAwEDCAkEAgESAgdpWQIDAD8rABg/EMYREgE5OREzETMxMCEhESERIREhAe7+ygK5AQz9cQW2ATb9ygAAAAABAKAAAAPPBY8ABwAvQBcAAQYDAQMICQ8EAQoDBAIBFQIHYFkCDwA/KwAYPxDGX15dERIBOTkRMxEzMTAhIREhESERIQHR/s8CHAET/gIEXgEx/eoAAAD//wAAAAAHvAdzAiYAOgAAAQcAQwD8AVIAFbQBJgUmAbj/i7QiJgsdJQErNQArNQD//wAUAAAGxQYhAiYAWgAAAQcAQwCHAAAADrkAAf+HtCAkBholASs1//8AAAAAB7wHcwImADoAAAEHAHYBugFSABNACwEmBSYBSB8jCx0lASs1ACs1AAAA//8AFAAABsUGIQImAFoAAAEHAHYBZAAAAAu2AWQdIQYaJQErNQAAAP//AAAAAAe8B1YCJgA6AAABBwBqAW8BUgAZtgIBMwUmAgG4//+0HjALHSUBKzU1ACs1NQD//wAUAAAGxQYEAiYAWgAAAQcAagD+AAAADbcCAQAcLgYaJQErNTUA//8AAAAABP4HcwImADwAAAEHAEP/fAFSABW0AREFJgG4/2q0DREHAiUBKzUAKzUA//8AAP4UBI0GIQImAFwAAAEHAEP/WQAAAA65AAH/f7QbHwAJJQErNQABAFIBtAOuApoAAwAYQAsAAwQFAE8BfwECAQAvXTMREgE5OTEwEzUhFVIDXAG05uYAAAAAAQBSAbQHrgKaAAMAGEALAAMEBQBPAX8BAgEAL10zERIBOTkxMBM1IRVSB1wBtObmAAAAAAEAUgG0B64CmgADABG1AAMEBQABAC8zERIBOTkxMBM1IRVSB1wBtObmAAAAAv/8/jEDTv/TAAMABwA0QA4EAAAJBQEIAsAB0AECAbj/wLUJDEgBBQa4/8CzDRZIBgAvKzPGK10yEQEzMhEzETMxMAEhNSE1ITUhA078rgNS/K4DUv4xi4yLAAABABkDwQGkBbYABwAbQA0BBQgJwADQAAIAwAQDAD8azV0REgE5OTEwEyc2EjczAgcnDhZlNdtCIwPBFlsBE3H+9eoAAAEAGQPBAaQFtgAGABtADQQBBwjABNAEAgTABgMAPxrOXRESATk5MTABFwYDIxI3AZYOMn7bRR8FthbF/uYBKM0AAAAAAQA//vgBywDuAAYAJEAUAwAHCMAD0AMCA8DPBQEFQAkMSAUALytdGs5dERIBOTkxMCUGAyMSNyEByzR83EEkARjXyv7rAQrsAAAAAAEAGQPBAaQFtgAHABtADQYCCAnAA9ADAgPABwMAPxrNXRESATk5MTABFhMjJgInNwE/JUDbO2EUDgW29f8AfwELVRYAAgAZA8EDdwW2AAcADwAiQBIEAAwIBBARB8AP0A8CD8ADCwMAPzMazV0yERIBFzkxMAE2EjczAgchJTYSNzMCByEB7BZlNdtCI/7o/h8WZTXbQiP+6APXWwETcf716hZbARNx/vXqAAACABkDwQN3BbYABgANACJAEgcKAAMEDg8KwAPQAwIDwAwFAwA/MxrOXTIREgEXOTEwAQYDIxI3IQUGAyMSNyEBpDJ+20UfARkB4TJ+20UfARgFoMX+5gEozRbF/uYBKM0AAAAAAgA//vgDngDuAAYADQAvQB0HCgADBA4PCgADEAPAA9ADBAMDDM8FAQVACQxIBQAvK10zMy9dMxESARc5MTAlBgMjEjchBQYDIxI3IQHLNHzcQSQBGAHiNHzcQSQBGNfK/usBCuwXyv7rAQrsAAAAAAEAewAAA6YGFAALAD5AHQACBQMJAgIIAwoBAQcEAwQMDQEEBAoHBwMIAAMSAD8/EjkvMzMRMxESATk5ETMzETMRMzMRMxEzETMxMAElEyETBTUFAyEDJQOm/rQ3/uo3/skBNzcBFjcBTAOgHvxCA74e8R4Bof5fHgAAAAABAHEAAAOwBhQAFQBpQDkMBxAEBA8FFQsRFAADAwMOCQYTAgIGCwoFBwYWFxQLCxEODg8DBgYArwm/CQJACQEJDgkOBQ8ABRIAPz8SOTkvL11dMzMRMxE5LzMzETMREgEXOREzETMzMxEXMxEzETMzETMRMzEwASUVJRMhEwU1BSc3BTUFAyEDJRUlFwJkAUz+tDj+6Tf+tQFLLy/+tQFLNwEXOAFM/rQvAi0f8h/+hwF5H/If5dUe8R4BeP6IHvEe1QAAAQBiAa4CoAQpAAsAFkAJAAYMDQN/CQEJAC9dzRESATk5MTATNDYzMhYVFAYjIiZilIuJlpeIipUC7JqjpJmYpqYAAAADAHX/5QZiATkACwAXACMAJkAUHhgSDAYABiQlGw8DCQN9WSEVCRMAPzMzKxEAMzMREgEXOTEwNzQ2MzIWFRQGIyImJTQ2MzIWFRQGIyImJTQ2MzIWFRQGIyImdVpWU1tcUlRcAkdaV1NbXFJVXAJIWlZTW1xSVFyPVFZYUk9bWVFUVlhST1tZUVRWWFJPW1kAAAAABwA//+4KAAXLAAMADQAYACIALQA3AEIAY0A2Lj44MwQUDgkZKSMeHikJABQzAj4IQ0QgCwsrFjAWQBYCMTsWOxY7EUADAwISNUAEHAcHJhETAD8zMxEzPzM/PxESOTkvLxEzXREzMxEzERIBFzkRMxEzETMRMxEzETMxMAEBIwETFBYzMjU0IyIGBRQGIyImNRAhMhYFFBYzMjU0IyIGBRQGIyImNRAhMhYBFBYzMjU0IyIGBRQGIyImNRAhMhYFpvzV8AMrhS0yYGAyLQG7sqyltAFZqbUBUCwyYGAyLAG6sK6ktAFYqbX3Oy0yYGAyLQG7sqyltAFZqbUFtvpKBbb8An99/Pp7feXn7d8Bye3ef338+nt95Ojt3wHJ7QFqf338+nt95uft4AHJ7QAA//8AhQOmAZwFtgIGAAoAAP//AIUDpgNCBbYCBgAFAAAAAQBSAF4CoAQEAAYAHEALAgQEAwMGBgcIBQEALy8REgE5ETMRMxEzMTATARcBAQcBUgFz2/7pARfb/o0CPQHHd/6k/qR3AcUAAQBSAF4CoAQEAAYAGkAKBAICAAMDBwgBBQAvLxESATkRMzMRMzEwAQEnAQE3AQKg/o3bARb+6tsBcwIj/jt3AVwBXHf+OQD//wB1/+UEGwW2ACcABAJIAAABBgAEAAAAELEDArj+LbQaGgQhJQErNTUAAAAB/ncAAAKRBbYAAwATtwAFAgQDAwISAD8/EQEzETMxMAEBIwECkfzV7wMrBbb6SgW2AAAAAQBmAvwDCgXHABIASEAwDAgICRIACQATFAAACQEgCVAJgAmwCeAJBQAJEAkwCUAJcAmQCaAJ8AkICQoeBA8fAD8zP81dcXIyERIBOTkRMxEzETMxMAERNCYjIgYVESMRMxczNjMgFRECRDw5WkjHohsOSY4BAgL8AZFMQGBx/rQCulRl+v4vAAEAIwAABCcFtgARAFlAMAcFAA4EBAkFDBACBQQSEwMHCAd5WQAACBAIAgwDCAgFDg4Rd1kODgoFGAoNd1kKBgA/KwAYPxI5LysREgA5GC9fXl0zKxEAMxESARc5ETMzETMzETMxMAEhFSERIREjNTMRIRUhESEVIQHpATz+xP7PlZUDb/3CAhn95wG4sv76AQayA/7+/rD+AAAAAAEAUgAABGoFywAlAHdAQiAUHBcOChERIh4bDBAWAhAbFwUmJw0gISB5WQoPIR8hfyGPIQQJAyEdERwdHHlZDh0dFwAABnNZAAcYFBcUdlkXGAA/KxEAMxg/KxESADkYLzMrEQAzGBDGX15dMisRADMREgEXOREzETMzMxEzMxEzMzMxMAEyFwcmJiMiBhUVIRUhFSEVIQYGByERITU2NjcjNTM1IzUzNTQ2AsG+w11Og0VQTAFn/pkBZ/6XBUZKAs776GRLBbKysrLkBctS5h0jVlZxsHOySmwn/vz4KmpVsnOwc87UAAADALj/7AbpBbYACAATACkAekBCHhwADg4PCQQjJyccJRcXHAQPBCorIiIgIx0mIyZ4WSMjDxANAHdZUA0BDw0fDQIJAw0NEA8YEAh3WRAGGRR4WRkZAD8rABg/KwAYPxI5L19eXV0rERIAORgvKxEAMxEzMxgvERIBFzkRMxEzETMRMxEzETMRMzEwATMyNjU0JiMjBRQEISMRIREhIAQBMjcVBiMiJjURIzU3NzMVIRUhERQWAdlCi41+iFQCf/7P/uc1/t8BdQEQARsB8E5TYYqjlpKoWJoBEP7wSAMGaHVtaMrs+v34Bbbl+/ojzzOmrQE+bGfr7dH+zTxDAAAAAQBC/+wEgwXBACcAv0B3BgkJCB0XFxscGQgfFiQRBQsWGRsHKCkGHRkdAQMPHQEQBh4deVkDDx4fHgIJHhgMFxkXAQMPFwEQBhgXeVkJGhgBSRgB6BgBTxhfGI8YvxgEXxhvGI8Ynxi/GAUDGBgTIg8AAQsGIgBzWSIGAA4BCwYTDnZZExkAPysAX15dGD8rAF9eXRESORgvX11xXV1xMysAX15dX10RMxgQxl5dMisAX15dX10RMxESARc5ETMzETMRMxEzETMRMzEwASIGByEVIQcVFyEVIRYhMjcRBiMiACcjNTMmNTcjNTM2ADMyFwcmJgMjep4XAZP+XgICAWP+rjMBDo+EdLH1/sQpiXYEAnSFJQFE87ykYkV4BMmNhrAjLyGy8zn/ADsBCuuyFyc1sPIBGVLoHyMAAAQAP//sBh0FwQADAA8AGwAwAEhAKSofEAoEFhYACiQvAh8HMTIsHCIZAA0BIA0B8A0BDQcDAwISJyIEEwcTAD8zPzM/PxDEXXFyMhDEMhESARc5ETMRMxEzMTABASMBARQGIyImNTQ2MzIWBRQWMzI2NTQmIyIGASImNTQ2MzIXByYjIgYVFDMyNxUGBR/81fADKwHutZ2Vt7Kglrb+LT5HRD09REc+/VqnvratdWQ3ZkBJSYx0Wk8FtvpKBbb7oqzAxKiqwcekZGVlZGRjYwE4uKqyuTKbKWZfviukLQAAAgAp/+4D3wXJABsAJABIQCEEFgwKIhoaDwoWHAocJSYZEwwiHg0DDQwDDAMMEwAHHhMALzMvMxI5OS8vETMvERI5ERI5ERIBOTkRMxEzMxEzETMRMzEwJTI2NzMGBiMiJjU1BzU2NxE0NjMyFhUUAgcVFBM0IyIGFRE2NgKBPE0Gzwu4urfMtmJUvMWjvsvytlo1J1hevmNm3L3PxH8xxBocAZu4ra6WtP7/cOm5A8GLTD/+uCeoAAQAhwAAB+4FtgAPABMAHwArAGpAPAMGBgcADQsgGhQmEyYQGgsHBiwtAwsHCB0pelkPHQESAx0XFyN6WQ8XHxcCF0APEkgXFwEOCAMRBxABEgA/MzMzPzMSOS8rXSsAGBDEX15dKxESADk5ERIBFzkRMxEzETMzETMRMzEwISEBIxIVESERIQEzJjURIRM1IRUTFAYjIiY1NDYzMhYFFBYzMjY1NCYjIgYEz/64/gIOGP70AUoB+hIYAQqLAn8VuJ6auLSimrj+IkFJR0BAR0lBBBf+/6H9iwW2+/DpqgJ9+kq8vAJ/q8LGp6jCx6NkZWVkZGNjAAAAAgAjAuUFhwW2AAcAGABBQCMAAQoMDA0TFhQUDQEDBBkaCRcQAwQNCBQDAQcDDhEDAQQEAwA/FzMRMy8XMxIXORESARc5ETMzETMRMxEzMTABIxEjNSEVIwEDIxcRIxEzExMzESMRNyMDAXOBzwIh0QJUxQgGe8HAx7qDBgjPAuUCY25u/Z0CK3/+VALR/dUCK/0vAaKJ/dUAAAD//wA3AAAGEgXNAgYBdgAAAAIAZv/dBIsESAAXAB8APEAdFQwfDg4EDBgEGCAhDR8vHz8fAhQfFB8RCBEAHAgALzMvMhESOTkvL10RMxESATk5ETMRMxEzETMxMAUiJgI1NDY2MzIWEhUhERYWMzI2NxcGBhMRJiYjIgcRAnmd8YWK9JWY84f8xTGmUoO3UUhi2ZMyo1iteiOTAQWdq/+Mjv79pf6cNUZpgSmbfAKLARU1QnX+6QAA//8AOv/oBq4FtgAnAhcCsgAAACcCPgPT/bMBBgB73gAAC7QDAgEREwA/NTU1AAAA//8AO//oBtEFyQAnAhcC+AAAACcCPgP2/bMBBgB1AAAAC7QDAgEREwA/NTU1AAAA//8AWv/oBtEFtgAnAhcC+AAAACcCPgP2/bMBBgI8BgAAC7QDAgEREwA/NTU1AAAA//8AQ//oBpkFtgAnAhcCngAAACcCPgO+/bMBBgI9CAAAC7QDAgEREwA/NTU1AAAAAAIAO//sBGIFywAXACMASUApEiIiBwANDRsHAyQlCx5gWQALEAsgCwMTAwsLBBUEGF1ZBBYVD11ZFQQAPysAGD8rERIAORgvX15dKxESARc5ETMRMxEzMTABEAIEIyImNTQSNjMyFyYjIgYHETYzMhIBMhI3JiYjIgYGFRQEYq3+3LzH05X5n2lUF7I2mVaqpurx/X9nqCAOTjVGc0kDmP78/kDo1M+uATabKew2OQEPWv7e/DYBFNA0NGzYcJgAAAIAOQAABQoFvAAFAA4ALUAVCgUECwULDxADAAoGBQEDBQppWQUSAD8rABg/EjkROTkREgE5OREzETMxMDcBIQEVIQEOAgMhAycmOQG7AV4BuPsvAmkCFSjuAlr8CySyBQr69LAEvg9Xiv00AwAngQABAKb+NwVIBbYABwAlQBEDBAcABAAICQAEIgUCaVkFAwA/KwAYPzMREgE5OREzETMxMAERIREhESERBAr92f7DBKL+NwZ9+YMHf/iBAAAAAAEAKf43BQIFtgALAEBAIgMABwkLBggCCQAGDA0CCAQBCQADBwQEB2lZBAMACWlZACIAPysAGD8rERIAORESORI5ORESARc5ETMRMzEwEzUBATUhFSEBASEVKQI//dEEjv0MAe79+QNI/jeqA0IC7ab8/W/9DP4AAAEAWAJkBDkDPwADADRAIgADBAUAuAEBZQEBSgHaAQI5AQEPAY8BAi8BbwGfAe8BBAEAL11xXV1dXTMREgE5OTEwEzUhFVgD4QJk29sAAAAAAQAl//IE/AbdAAgAPEAmCAMJCgMEBgQGBAcBDwcfB38HjwcEDwefB68H3wfvBwUHQAsQSAcALytdcS8SOTkvLxEzERIBOTMxMAUjASM1IRMBMwKYt/70sAFFzQHq2w4C4dX9yQVsAAADAHEBewU3BCMAFAAfACkAW0A2EAUFIh0LIhcAJycXCwMqKxAXIgUECA4kFRUDCCAaGggSHw4BPw5fDn8Onw6/Dt8O7w7/DggOAC9dcTMzMxEzLzMzETMREhc5ERIBFzkRMxEzETMRMxEzMTABFAYjIicGBiMiJjU0NjMyFzYzMhYBMjcmJiMiBhUUFgEiBxYzMjY1NCYFN7aHsHs7k0+NtLWMsHN9qIyx/IVYTiZQMjhFRQJqV1FQWjhERgLNjsSwTV24mpDArqq5/uaHRENNPDxJAQiFiVA5OksAAQAA/hQDTAYUABYAHkAMFRgJBA4OFxgMBwASAC8zLzMREgE5ETMyETMxMAEiBhURFAYjIic1FjMyNRE0NjMyFxUmAq4zPMS4bVZbQ27Cu21WWQUUSEH7BLvAKf4njgT4ucEo/iYAAP//AFgBXQQ5BEIAJwBhAAAAxQEHAGEAAP82AB9AFAFvHAFQHAEcAG8GAT8GTwZvBgMGABFdcTURXXE1AAABAFgAjwQ5BRkAEwBcQDUMDREDAgcPExMNCAQNBwQHFBURBUcFAQUAIARQBGAEAwQQCFcIAUgIAQ0IBAM/CV8JfwkDCQAvXRczXV0RMy9dMzNdETMREgE5OREzETMRMxEzETMzMxEzMTABAyc3IzUhNyE1IRMXBzMVIQchFQIxf8lZ6gFQUP5gAgSDyVzt/q5PAaEBov7tVL/bqtkBGVbD2arbAAIAVgAABDkFPQADAAoANEAcCQUECAMDBQADCwwvBK8EAgQEAQcBAC8HrwcCBwAvXS8yERI5L10REgEXOREzMxEzMTAzNSEVEwE1ARUBAVYD4QL8HwPh/VQCrNvbAQgBtpAB7+/+wv7oAAAAAAIAWAAABDkFPQADAAoANEAcCgYACQUDBQADCwwvCq8KAgoKAQcBAC8HrwcCBwAvXS8yERI5L10REgEXOREzETMzMTAzNSEVCQI1ARUBWAPh/B8CrP1UA+H8H9vbAfgBGAE+7/4RkP5KAAAAAAIAWAAABFAFwQAFAAkAKUASBwkJCAgDAAYDBgoLCQcEAgQEAD8vEjk5ERIBOTkRMxEzETMRMzEwAQEjAQEzEwMDEwRQ/j1y/j0Bw3K79PT0At/9IQLfAuL9HgGa/mb+ZwAAAP//ACkAAAT4Bh8AJgBJAAABBwBMAxkAAAAQsQIBuP/0tCAgAiQlASs1NQAA//8AKQAABOoGHwAmAEkAAAEHAE8DGQAAAA65AAECT7QXFgICJQErNQABAGgE2QQzBj8ADQAeQA4GAA4PDQYGCg8DXwMCAwAvXTMzLzMREgE5OTEwAQYGIyImJyEWFjMyNjcEMxP05u3jDgERB1lzZWMLBj+7q6TCZ1NbXwAB/33+FAHRBF4ADQAfQA4CCwgIDg8JDwAFXVkAGwA/KwAYPxESATkRMzIxMBMiJzUWMzI2NREhERQGRnVURklNRwExzv4UGfATVlQEqvspssEAAAABAV4EzQK2BhQACAAUtwgFCQoIgAMAAD8azBESATk5MTABNjY3IRUGByMBXg8nCAEaUFayBOcxvEAUsIMAAAAAAQFe/jsCtv+DAAgAE7YIBQkKCIADAC8azBESATk5MTABNjY3IRUGByMBXg8nCAEaS1uy/lYxvEAUqIwAAQFOBNkCpgYhAAgAGkAMBAAJCgeADwRfBAIEAC9dGs0REgE5OTEwAQYGByE1NjczAqYPJwj+5k5YsgYGMbxAFaqJAAACAAwCSgL2BbwACgASAEJAIgACEgUJAgILDgMFAxMUAQVYBQEFCQ8SHxICEhIHAyAOBx4APzM/EjkvXTMzXREzERIBOTkRMzMzETMRMxEzMTABIxUjNSE1ATMRMyE1NDcGBgcHAvZ97v6BAYHsff6VBgk1D38C4ZeXmgJB/c2kVmIabBe/AAAAAQBUAjkCywW2ABoAOkAgFwMIGRQDDhQOGxwRAAAQAHAAgADwAAUAAAYYFR4MBiEAPzM/MxI5L10zERIBOTkRMxEzMxEzMTABMhYVFAYjIic1FhYzMjU0JiMiBycTIRUhBzYBjY+vvreeZDKFN6xXUT84bSUCCP6cEDgEf5WAkaA0wCAqgz9AEisBuLiHCAAAAQA7AkoC1wW2AAYAIEAPBQEBAAIDBwgAIAUCAgMeAD8zEjk/ERIBFzkRMzEwEwEhNSEVAZoBVP5NApz+vwJKArS4lf0pAAAAAwAtAjUC2wXLABcAIQAtAERAJgYeExgYECUVAysKHh4rFRAELi9lKHUohSgDKCATBgQNIgAfGw0hAD8zPzIRFzldERIBFzkRMxEzETMRMxEzETMxMAEyFhUUBgceAhUUBiMiJjU0NjcmNTQ2ExQWMzI2NTQnBhMiBhUUFhc2NjU0JgGFjahDTEtCI7+XobdHV3+uFDo5OzyFZXUrLTQmJjIqBct5aT9kKyo9SSx1lYx4QWouWX5oev1uLTk5LVEsLAGjLx0pMhUTMisdLwAAABYAVP6BB8EF7gAFAAsAEQAXABsAHwAjACcAKwAvADMANwA7AD8AQwBHAFMAWwBrAHQAfACJASlAwGNkZHowPEAFBA8PADE9QQQMVE4DESAcSFgjHzQsbHZ2azcvYHBnejgYOxuHhAYSCSQoRAQXFyUpRQoEFBQShBt/GHpwL2ssH1gcEU4MEYqLY3V1e2yLbAJabGpsAgNsbGtcgn19VktLdmtaUURrVGtka9RrBCBrMGsCAnRRhWsEMFxAXHBcgFwEwFwBL1xPXAJcXAAKQipBKT5GPUUyJjElDRUQDAEZHS0TBA8PEhgcLAQMIDQ4BgQEByE1OQQFAQAvFzMRFzMvFzMzERczERIXOTkvXV1xFzNfXV0vMy8zMy8zMy8zERI5L19xcTMSORESARc5ERczMxEXMxEzETMRMxEzETMRMxEzETMRMxEzETMRMxEzETMRMxEXMzMRFzMRMxEzMTATESEVIxUlNSERIzUBETMVMxUhNTM1MxEhNSEVITUhFQE1IRUBIxEzESMRMwE1IRUBIxEzATUhFTM1IRUBIxEzNSMRMwEjETMFFAYjIiY1NDYzMhYFFDMyNTQjIiUzMhYVFAYHFRYWFRQGIyMTMzI2NTQmIyMVFTMyNjU0IwEiJzUWMzI1ETMRFAZUAS/ABc4BMG35AG/ABQ7Dbf1JARH74QEO/vIBDgS3bW1tbfvCARD8MG9vAsABEHcBEfqob29vbwb+bW37n4d/f4eHf36I/nOHh4eHAeGsbXAuLDswbV7Pe0IuJCovO0oxJVoBXjQcKxlWfWkEvgEwb8HBb/7QwfkCAS/CbW3C/tFtbW1tBv5vb/qoAQ4CAgEP+jttbQGmAQ4ESm9vb2/8LwEQeQEP/WgBEEmRnJyRkpuak8XFxGFDUzFECAQNRDhRWQFiIiAiHeOaKyVK/voKZghWAZL+cl9jAAMAVP7BB6oGFAADAB4AKgAsQBcBCxclBB4fEQMJKyweKBQOKCIOIg4CAAAvLzk5Ly8zETMSORESARc5MTAJAwU1NDY3NjY1NCYjIgYHFzYzMhYVFAYHBgYVFQMUFjMyNjU0JiMiBgP+A6z8VPxWA+ssQWdJu6VPukdSoFo/PjFIVDsbR0ZCSUhDSEUGFPxW/FcDqfsvMkExUn5Yh5o4KrJQOi81SzZEcEo7/u0/SEk+QElI////ff4UAu0GIQImAjcAAAEHAUz+zgAAAAu2AQAWDwkKJQErNQAAAP//ABkDwQGkBbYCBgIHAAAAAgAp/+wFngYfAAcAMwBzQEMYDyASBSwzMTEmKAAsEg8HNDUAAzIoLzMzMmBZMzMNLxoVXlkAGhAaMBpQGnAagBoGCQMaGg0vLwNgWS8BDSNeWQ0WAD8rABg/KxESADkYL19eXSsREgA5GC8rERIAORESORESARc5ETMRMxEzETMxMAEmJiMiFRQWBRcVEAAhIBE0NjU0JiMiByc2MzIWFRQGFRQWMzI2NTUnJiQmNTQ2MyATMxUD3xaNY4HRAfQC/sP+zv5KDBscKjBMlZhaZw9hXJGTAt/+xqHgxgHiV5ID36a0bHB85yst/q7+nAFLNWkrKhwdtlZeWD+GR0tP5vcfIQJ0zYqfvf3A5QAAAAABAAAAAAUGBcMAFQAoQBMUERISCBYXABIUAxISBQprWQUEAD8rABg/PxI5ERIBOTkRMzIxMAE+AzMyFxUmIyIGBwYCBxEhEQEhAn0+eGh1X1VCLBkoNRpDtjb+zP4ZAVADVIj1sEIb5QwrJ2D+nI791QIvA4cAAAACADP/7AfLBF4AFAAoAFhALQ8SCxoIGAojIBAXEhUVFyAKCAUpKgMNISEFDRAYCw0LYFkNDyYdBR1eWQAFFgA/MysRADMYPysRADMzERI5GC8RORESARc5ETMRMxEzETMRMzMRMzEwBSInIwYjIgI1NDchNTchFSEWFRQCAzQnIQYVFBYzMjY1NSEVFBYzMjYFRu1TClLu3eU//vquBur+/j/lM0D8oD5cZ1RMARhMVGdcFNLSAQ78stF/ZuXRsvz+8gIQqdTJsJaRc4eJiYdzkAAA//8AuAAABtMHdQImADAAAAEHAHYBwwFUABNACwEdBSYBahYaBw0lASs1ACs1AAAA//8AoAAAB0IGIQImAFAAAAEHAHYB7gAAAAu2AWklKQsbJQErNQAAAP//AAD9qAWFBbwCJgAkAAABBwJYAXMAAAANtwMCAxQOBAclASs1NQD//wBW/agEOwR1AiYARAAAAQcCWAEAAAAADbcDAgopIwgYJQErNTUA//8AAf/sB6UFzQAnADIBvgAAAQYCWYgAAA23AwJ+LS0GBiUBKzU1AAACAFj9qAJO/4MACwAXADZAIBIGAAwGDBgZFU8DAQMPDwkfCQIPCR8JLwkDCUAZHUgJAC8rXXIzL3EzERIBOTkRMxEzMTABFAYjIiY1NDYzMhYHNCYjIgYVFBYzMjYCTo5wcIiHcW6QnjYqKjYwMCo2/phshIBubIGEaS0zMy0tNDQAAAACAHkEaAMrBccAEQAaACFADhccBwAAGhscDxmAAxUDAD/GGt3EERIBOTkRMxEzMTATNDY3FQYGFRQeAhUUBiMiJgU3NjchFQYHI3mYjUlFJS0lP0JDSgFaEyoVAQYqpYkE8lJwE0oWJR0SEhEaHCUtSidDk1cUXtUAAAD//wApAAAIDgYfACYASQAAACcASQMZAAABBwBMBi8AAAAbuQAB/zG2GRgCOiUDArgCFrQ2NQI6JSs1NSs1AAAA//8AKQAACAAGHwAmAEkAAAAnAEkDGQAAAQcATwYvAAAAGbkAAf8xtRkYAjElArgCFrQtLAIxJSs1KzUAAAIAd//sBtcGFAATAB0AUkAvDx8SABQGABkGGR4fDw0vDT8Nbw1/Da8N7w0HCQMNEgsLAwkJG2lZCQQDF2lZAxMAPysAGD8rERIAOREzGC9fXl0REgE5OREzETMRMxEzMTABEAAhIAAREAAhIBc2NSEXBgYHFgUUFjMgERAhIgYF5/6Y/rD+sP6YAWoBUgFasl0BLQ4kjns9+9W6uQFz/o+5vALd/pX+egGGAW0BawGDyz3VFrHJMp/W9fgB7QHu+QAAAAACAFz/7AXNBQYAFgAiAExAKBEkFQAXBwAdBx0jJCAPAQAPUA8CDAMPFQ0NAwoKIF1ZChADGl1ZAxYAPysAGD8rERIAOREzGC9fXl1xERIBOTkRMxEzETMRMzEwARAAISImAjUQACEyFhc2NSEXDgIHFgUUFjMyNjU0JiMiBgSY/uD+/6H2hAEeAQNwyEeVAS0PG1eOaTT8+217emtse3psAjH+7/7MjQEIsAESATBFRS3wFoadaxp9mqaqqaempqUAAAAAAQCu/+wHKQYUABwAS0ArBh4VEgELCxsSGx0eDwQvBD8EbwR/BK8E7wQHCQMECgEBDxwTAw8YaVkPEwA/KwAYPzMSOS8zL19eXRESATk5ETMRMxEzETMxMAEVNjY1IRcOAgcRFAYEIyAANREhERQWMzI2NREFXkpGAS0OIGu2ipH+7rv+5v7IATWNmJiJBba8GpZqFpqnZxT9wqL0ggEh+wOu/Gmck5mYA5UAAAEAmv/sBnMFBgAeAFBAKxkgCwgUHh4BAREIAx8gIBcBABdQFwIMAxcCBQkdFBQAEgkPABUFDl1ZBRYAPysAGD8/MxI5LzMREjkvX15dcRESARc5ETMRMxEzETMxMCEnIwYGIyImNREhERQWMzI2NREhFTY2NSEXDgIHEQO4KRIws3PFyAExVl6AcgExR04BLQ8gbLeOj01W08YC2f1zeXmrxgIOdRaTdBacqGYV/M////wWBNn+VwYhAAcAQ/rKAAAAAP///NAE2f8RBiEABwB2+4QAAAAA///74ATX/x0GDgAHAVL7EQAAAAAAAfzZBMP+oAakABMAHUAQAgUFCx8RAREPBF8ErwQDBAAvXcRdMjkvMzEwARQHByMnNjY1NCYjIgc1NjYzMhb+oKIKrhdLNioiQUoeaSmMiwXPnClHkwwzJSAiF6gKDW8AAfzZ/lL+Jf99AAgADLQCIAcBBwAvXTMxMAE0MzIVFAYjIvzZpqZUUqb+55aWR07//wC4AAAEAgdzAiYAKAAAAQcAQ/96AVIAFbQBFAUmAbj/irQQFAILJQErNQArNQD//wC4AAAF3QdzAiYBsgAAAQcAQwBUAVIAFbQBGAUmAbj/drQUGA8IJQErNQArNQD//wBc/+wEYgYhAiYASAAAAQYAQ6kAAA65AAL/t7QgJAoRJQErNQAA//8AoAAABSMGIQImAdIAAAEGAEMMAAAOuQAB/5e0EhYNBiUBKzUAAAABAHf/7Ag9BckAMgBQQCgEKxwoIxYrKAowMCgWAzM0EBkpKRMZACAZIGtZBxkELSYTJmpZDRMTAD8zKxEAMxg/MysRADMREjkYLxE5ERIBFzkRMxEzETMRMxEzMTABIgYHJzY2MyAAERAAISImJwYGIyAAERAAITIWFwcmJiMiBhUUEjMyNxEhERYzMhI1NCYF/idaRGxAsEsBDAEp/rj+1nSyTU2udP7X/rcBKQEMSq9CbERbJoCOurBTVQE2SGywuI8E0yEt1zE8/or+rf6J/mNIS0tIAZwBeAFSAXc6M9ctIfPi+/73RQGM/nRFAQn74fQAAQAGAAAGxQReABkANUAcCRgKBg4YExgGBQEFGhsJGBgFDgMSCgEPFxUAFQA/Pz8zMxc5ETMREgEXOREzETMRMzEwIQEhExYXMzY3EwMhExYXMzYSESEQAgchAwMBkf51AVTDIwoIDT9sdAFRqiISCGddATTI0P7qlrYEXv1/eG1MmQEVAWz9lHpqmwGTASL+kP3ezAGY/mgAAAAAAgAAAAAFBgW2ABEAGgCAQEkKERYPARISDAgFFggWGxwACg8KAQ0GCwqBWQ8LCwENARppWTABoAEC4gEBA4EBAQRdAQEFFQEBAwFzAQIMAwEBCA0DCBJpWQgSAD8rABg/EjkvX15dXV9dX11fXXErERIAORgvMysAX15dETMREgE5OREzETMzETMzETMyMTABFTMgBBUQISERITUhNSEVIRUBMzI2NTQmIyMCNXsBHgE4/aT+Vv8AAQABNQF5/odpnZKUtFAEEIvo1P43BBDmwMDm/O5lZmVZAAAAAAIAAAAABQQFJwARABkAckBBChESDwEXFwwIBRIIEhobDQsBFmJZASQfIEi6AcoBAmYB9gECAwEkDUkPAQEKBgEBCAsACgsKglkPCw8IF2JZCBUAPysAGD8zKxEAMxESORgvX15dK19dXSsrABgQxhESATk5ETMRMzMRMzMRMzIxMAEVMyAWFRAhIREhNSE1IRUhFRM0JiMjETMyAjPXAQL4/hP96/7+AQIBMQFnOWhn0dXLA3nLpKb+nAN55cnJ5f3nQTr++AAAAQC4/+wHUgXLACIAjUBWFBAQESEBARUaCAAVDhEGIyQBDyEUABQBDAYUD2lZRhQB1hQBEhQBAyEUAbEUAQSjFAFMFAE7FAEZFAEDDxSPFAIJBhQUERIDERIYHmlZGAQLBGlZCxMAPysAGD8rABg/PxI5L19eXV9dXV1dX11xX3FdcSsAX15dETMRMxESARc5ETMRMxEzETMxMAEhFhYzMjY3EQYGIyAAAyMRIREhETMSACEyFwcmJiMiBgchBmb9mg3QqmHBcmjJd/7F/p0Zzv7KATbXLAF8ASTm22RatFej0BQCZAJ3tdQoJf78KCMBTgE9/YkFtv3DARgBOmf8JzquogABAKD/7AYdBHMAHgB4QEgJBQUGFBcXCgMPHR0WAwYEHyAXBAkEYFk2FAEUCSQfIEi6CcoJAmYJ9gkCAwkkDUkPCQEKBgkJBgcPBhUMEmBZDBAAGmBZABYAPysAGD8rABg/PxI5L19eXStfXV0rM10rEQAzERIBFzkRMxEzMxEzETMRMzEwBSIkJyMRIREhETMSITIWFwcmIyIHIRUhFhYzMjcVBgTF8P7yGd3+zwEx3TEB0Vu/TVaNeMcQAab+Wgl0c5iwiBTy7/4zBF7+UgHDLCTRP+HjgH5Q7kUAAgAAAAAF1wW8AAsAEABwQEYLEggNAwwDBAQREg8ICQIGDAZsWV8MjwyfDANNDAHdDAEMJBtJDCQUSaoMAUwMAToMARkMARkMAQgMiAwCDAwJAAQIEgkDAD8/MzMSOS9dXXFdXV0rK11xcSsRADMREjkREgE5ETMzETMyETMxMCEDIxEhESMDIQEhAQEhAicGBKrRZP7vZs/+0QItAXsCL/x1ATmGEwsCd/2JAnf9iQW8+kQDZAFZRTQAAAACAAAAAAUfBF4ACwARAFFALgoTBw0CDAIDAxITEAcIAQUMBWJZfQwBDCQNSS8MPwyPDAMYDAEMDAgLAwcVCA8APz8zMxI5L11dK10rEQAzERI5ERIBOREzMxEzMhEzMTABIxEhESMDIQEhASEBIScmJwYDYk7++FCX/tsB1wFvAdn+2/4QAQ0jSB0aAab+WgGm/loEXvuiAnVQnFddAAAAAgC4AAAICgW8ABMAGQCVQFoTGxUDFAQIDwsLDAYJAwQECQwDGhsYDA0CBhQGbFkAFAEADwEMBg8KaVkURg8B1g8BEg8BAyEPAbEPAQSjDwFMDwE7DwEZDwEID4gPAg8PDA0DBAgAAwwSEQMAPz8XMz8SOS9dXV1dXV9dcV9xXXHFKwBfXl1dKxEAMxESORESARc5ETMRMxEzETMzETMRMxEzMTAhAyMRIREjAyETIREhESERIRMhAQEhJyYnBgbd0WT+8GfP/tHw/sv+ygE2AZfbAXsCL/x1ATkzXAoLAnf9iQJ3/YkCd/2JBbb9wwJD+kQDZInuJzQAAAAAAgCgAAAHNwReABMAGAB2QEcSGhUCFAMHDgoKCwIDAwUICwQZGhcLEAEFFAViWQ4JYFkUDiQfIEjKDtoOAgYOAXYOAQ4kDUkYDgEODgsQDA8DBxMDCxUQDwA/PxczPxESOS9dK11xXSvFKysRADMREjkREgEXOREzETMRMzMRMxEzETMxMAEjESERIwMhEyERIREhESETIQEhATMmJwYFb0L++EKm/tzC/s3++AEIAZG3AW4B2f7b/iPqYBcVAcv+NQHL/jUBzf4zBF7+UgGu+6ICmtpESQAAAgApAAAGRgW2ABkAHABJQCcbCRwEExQJGg0IGhQEBQAHHR4SFQQVbFkaCQQEBhQOABIGHGlZBgMAPysAGD8zMxI5LzMzKxEAMxESARc5ETMRMxEzETMxMDMTNjY3ATUhFQEWFhcTIQMmJicRIREGBgcDARMhKZg6qIT+iQUV/oGHqDmY/sh7KVRD/s1HVih7Adf+/gYBxbO6JAHVi4v+KyW/rf47AYF8ZBD9jwJxEGV7/n8DewE5AAIAFAAABOcEXgAZABwAZ0AbGxMcExQJGg0IGhQEBQAHHR4SFQQVZVllGgEauP/oQCALD0gaCQAEEAQCDwQfBG8EAwsDBAQGFA4AFQYcYlkGDwA/KwAYPzMzEjkvX15dcTMzK10rEQAzERIBFzkRMxEzMxEzMTAzEzY2NwE1IRUBFhYXEyEDJiYnESERBgYHAwE3IRR1KH1b/t8ENP7bWXkqdP7+Xho4L/74NjwXXgFotP6aAVp9kCABbWpq/pEgkHv+pgEnTUIL/j8BwwpETv7ZAq7hAAAAAAIAuAAACG0FtgAfACIArkBpIQ8iAwoACwkFBQYZGg8gEw4gGgoGBiMkIA8YGw8bbFkgDwEwDwEDDwAJAQwGCQRpWQ9GCQHWCQESCQEDIQkBsQkBBKMJAUwJATsJARkJAQMPCY8JAgkGCQkGDAcDFBoAAwYSDCJpWQwDAD8rABg/FzM/ERI5L19eXV9dXV1dX11xX3FdcTMrAF9eXRgvX11xKxEAMxEzERIBFzkRMxEzETMRMzMzETMzETMxMCETNjchESERIREhATUhFQEWFhcTIQMmJicRIREGBgcDARMhAlCXLDP+qP7KATYCQ/6mBRX+gYeoOZj+yHspVEP+zUdWKHsB1/7+BgHFfzP9iQW2/cMBsouL/islv63+OwGBfGQQ/Y8CcRBle/5/A3sBOQACAKAAAAb2BF4AHwAiAKdAHyEZIhoDCgALCQUFBhkaDyATDiAaCgYGIyQCIAERBCC4/+hAQwsPSCAPGxgPGGVZAA8QDwITAw8JBGBZDwkkHyBIugnKCQJmCfYJAgMJJA1JDwkBCgYJCQYMBw8UGgADBhUMImJZDA8APysAGD8XMz8REjkvX15dK19dXSszKwAYL19eXSsRADMRMytfXl0REgEXOREzETMRMxEzMzMRMxEzETMxMCETNjchESERIREhATUhFQEWFhcTIQMmJicRIREGBgcDATchAiN1HSD+0/74AQgBz/8ABDP+21l6KXX+/l4XPC/++DY8F14BaLX+mQFaVR7+MwRe/lIBRGpq/pEgkHv+pgEnSEgK/j8BwwpETv7ZAq7hAAEAKf4vBLYG8ABJAJlAVj0oAwogLg0ODgcyQhgYKAdGCjcRLi43RkAoBUpLFCtrWRRGQEMFDwABCQMAQA0yMzMya1n5MwF7MwEzJA1JDTONMwIMBDMzSkBAOmxZB0ADJRpsWSUiAD8rABg/MysREgA5GC9fXl0rXV0rERIAORgQ1F9eXTLEEjkuKxESARc5ETMRMxEzETMRMzMRMxEzETMRMxEzMTABMhcVJiMiBxYWFRQGBxUWFhUUBCEiBgYVFDMyNzcyFxUmJiMHByImNTQ2NzY2NTQmIyM1MzI2NTQmIyIHJzY2NyYnNTMWFz4CA8NKMBo+XV+ouLefuMv+r/7YXGIpkVlomoweEFo2te+yzez/w6zt6Il76NSFhc++iFO3d2CD0TabTldlBvARlwyaIriAjLkZBhS3kcnrEScpWAUFKeUQGQQErJOtnAcEZW5haPJYZktZd882TBF3gxsom2ROLgAAAAEAH/4vBCMFZABIAJdAVgo+GTYgIiMjHUcMDiw+HRIgAyZDQwMSDD4FSUopQV5ZKSJHSEhHe1kpSAGJSAEMSJxIAg0ESEhJDDwvYFk8IhIMDxsPFh8WLxYDCQMWDB0MDAZiWQwPAD8rEQAzGBDUX15dMsQROT8rERIAORgvX15dXXErERIAOS4rERIBFzkRMxEzETMRMzMRMxEzETMRMzMRMzEwATI2NTQmIyIGByc2NycnNTMWFz4CMzIXFSYjIgcWFhUUBxUWFhUUBCEiBhUUFjMyNzYzMhcVJiYjIgYjIBE0NjMgNTQmIyM1AbatkWp6TcNQWoN7XVfEOZhSVmM7SDMiNlRKfobRgW/+3P7xbGFLWVVPTjCIHhFWNESzfP6e5u0BCJyidgKwOD02NiYh1TATbF8bKZpmTC0QmA1vIo1hvTkKIn1lqLInNDQqBQUp5REYCAEzr6WBREHTAP//AG0AAAaWBbYCBgF1AAD//wCP/hQGRgYSAgYBlQAAAAMAd//sBecFzQALABIAGAB0QEkVEBAGABYPBg8ZGhUQaVkqFZoVAkYVVhUC1hUBTBUBFSETFEgVHgxJGRUBGRUBA48VAQ8VjxUCCQYVFQMJCRNpWQkEAwxpWQMTAD8rABg/KxESADkYL19eXXFfXXErK11dcXErERIBOTkRMzMRMxEzMTABEAAhIAAREAAhIAABMjY3IRYWEyADISYmBef+mP6w/rD+mAFpAVEBUQFl/UijvRP9GBS3rP7ENwLgGbcC3f6V/noBhgFtAW0Bgf58/KfAvbTJA9v+pKmzAAAAAwBc/+wEmARzAA0AEwAZAGtAQRYREQcAFxAHEBobFhF7WRYiIiNIFiIZGkg5FkkWAqkWAV0WAUwWAQMcFgEEFhYDCg8UAQwGChRdWQoQAw5dWQMWAD8rABg/KwBfXl0REjkYL19dX11dXXErKysREgE5OREzMxEzETMxMAEQACEiJgI1EAAhMhYSATI3IRYWEyIHISYmBJj+4P7/ofaEAR4BA6H2hP3jwxz+Pg9uZMMeAcIObQIx/u/+zI0BCLABEgEwjP76/gDodHQCnOFwcQAAAAABAAAAAAWmBcMAFQAgQA8GFBYXBgMRAGlZEQQKBRIAPzM/KwAYPxESATkyMTABIgYHASEBIQEWFzY3Ez4CMzIXFSYFQi5AKv6Y/q7+EAE5ASErFRE2qjdceFZ0RjMEwUd2+/wFtvxzoIuAqwIIq5xLJ/IXAAEAAAAABNEEZgAWACBADwAPFxgADwwRXVkMDwQWFQA/Mz8rABg/ERIBOTIxMBEhExYXMzY3Ez4CMzIXFSYjIgYHASEBP80yCAQNLHszTGtVTEgrJyAzFf7M/skEXv2Oo09vfAFYjmoxHOwTLDf88v//AAAAAAWmB3MCJgJ9AAABBwNzBScBUgAZtgIBKQUmAgG4/4e0JB8GFCUBKzU1ACs1NQD//wAAAAAE0QYhAiYCfgAAAQcDcwTLAAAAELECAbj/lrQlIAAPJQErNTUAAAADAHf+FAqNBc0ACwAXAC4ASUAnITAMBgASLicYEgYFLzAdLi4lIBgPCRVpWQkEAw9pWQMTJSpdWSUjAD8rABg/KwAYPysAGD8zEjkRMxESARc5ETMRMxEzMTABEAAhIAAREAAhIAABEBYzMjYRECYjIgYlIRMWFzM2NxMhAQYGIyInNRYzMjY3NwWW/rb+uv68/rUBTAFFAUMBS/wgpKyto6Gtr6MESgFO0xsKBgsgzwFH/idB8aFOTTdBUXkiEgLd/o3+ggF7AXgBeAF2/of+if7/7OUBCAEF6e6B/YtScGdbAnX7E6+uEfINY2Q3AP//AFz+FAkpBHMAJgBSAAABBwBcBJwAAAALtgIAGiMMMiUBKzUAAAAAAgB3/4MGOQYxABUAKABYQCwWCiMfHycZACEhGQoDKSocGQUHMAMfGQcHGWpZBxANJScwIxMnDQ0nalkNAwA/KxESADk5GhDJEMkYLysREgA5ORoQyRDJERIBFzkRMxEzMxEzETMxMAEQAAUGIyInJAAREAAlNjYzMhYXBAABFBYXNjYzMhYXJBEQJQYjIicEBjn+0/7iI3F2Hf7h/s8BLgEkEEQ9NUgSAR8BMft9josWRTAtRRcBF/7tJ2ZmKf7rAt3+v/6CKHNzJAF/AUYBQwF7JjwyLEIm/oT+vMbyJSoeHipKAZMBjk1LS00AAAAAAgBc/5EFEgS0ABUAKwBWQCwkHioYFgweGAAhIRgJDAQsLScqMCQqDypeWRETDw8bGDAeGAkYYFkGMAMJFQA/MxrJKxEAMxoQyRg/M8krEQAzGhDJERIBFzkRMxEzETMRMxEzMTABFAIHBgYjIiYnJgI1NBI3NjMyFxYSBRAXNjYzMhYXNjY1NCYnBgYjIiYnBgUS7+YJSDY5Rwnf8vPmE25qFeD9/IGeEzk5Kz4aU09QSRE+PTZEE5YCMev+4CY1Ojs2JwEl4+0BIyFSUiL+2+r+8j4nKyEzH7F+gqUdLzgxNkEAAwB3/+wIPQiNADEARwBZAHtASCMWVEtITwovLzMETxxLPRYIWltJOVk5aTkDAFdwV4BX4FcECktXS1cZOTIzMw89ARcDPT05QgAgGSBrWQcZBCwmEyZqWQ0TEwA/MysRADMYPzMrEQAzGC8zMy9fXl0zETMREjk5Ly9eXV0REgEXOREzETMRMxEzMTABIgYHJzY2MyAAERAAISImJwYGIyAAERAAITIWFwcmJiMiBhUUEjMyNjcWFjMyEjU0JgMVIyInJiYjIgYHIzU0NjYzMh4CMwEUBgc1NjY1NC4CNTQ2MzIWBf4nWkRYPJtQAQwBKf64/tZrslRNsHT+1/63ASkBDFGaPFhEWyaAjq6gYLpKS7pfoqyPcRC0iGcyGS4rC7Y/bmk6cHeFTv7+s4I0QCUsJU5HTlQE0yEt1y8+/or+rf6J/mNHUktOAZwBeAFSAXc+L9ctIfPi+P7wcGNlbgES9uH0A0HCNikNMzsxYnQ2Ji0m/rNdjAdWDjoeExIQGhw1OloAAAADAFz/7AbDB1IAKQBAAFIAi0BWDgNNREFIIxgYKx5ICEQ1AwhTVABEAfBEAQBQEFBgUHBQ8FAFRFBEUAYxKisr7zUBADVgNXA1AzUxNQ87nzuvOwMJAzsaDAYMYFkgBhAVEQARYFkmABYAPzIrEQAzGD8zKxEAMxgvX15dMzMvXV0zETMREjk5Ly9dXXEREgEXOREzETMRMxEzMTAFIAAREBIzMhcHJiYjIhEUFjMyNxYzMjY1ECMiBgcnNjMyEhEQACEiJwYBFSMiJyYmIyIGByM1ND4CMzIeAjMBFAYHNTY2NTQuAjU0NjMyFgJm/v7++Oz1lXxWP0IlundskoKCk212uidBPlZ8lPbs/vr++618egImELSIZzIZLisLtiI6ZlQ6cHeFTv7+tn8yQiUsJU5HTlQUASkBJgEaAR480R4N/qqyuoeHu7EBVg4d0Tz+4v7m/t3+1HBwBu3CNikNNDsyRWU+JCYtJv6yXosGVgw7HxMSEBocNDpZ//8Ad//sCD0HQgImAmkAAAEHCWgCCgGeABW0AT4FJgG4//60QDQWCiUBKzUAKzUA//8ABgAABsUFpAImAmoAAAEHCWgBHwAAAAu2AQcnGwETJQErNQAAAAABAHf+FAUjBcsAFwAxQBgJFhADFhcDFxgZFyMHDWlZBwQAEmlZABMAPysAGD8rABg/ERIBOTkRMxEzETMxMAUgABE0EiQzMhcHJiYjIgIVECEyNjcRIQNa/pn+hLIBTeLh6mVbuVrD1wGeOrNO/ssUAYMBauMBV7hn/Cc6/vrs/hcTEf0CAAAAAQBc/hQD8ARzABUAMUAYCBQOAxQVAxUWFxUbBgtdWQYQABFdWQAWAD8rABg/KwAYPxESATk5ETMRMxEzMTAFJgAREAAhMhcHJiMiBhUUFjMyNxEhAlr8/v4BDgEhuK1YrWh+coF3fYP+zxATAR8BBwEqASBQ6EKpqZysJf0MAAABAGj/+gR5BQoAEwA7QB8BEAsGBwUDBgAJEwoQDRENCgkGBQYUFQwSCAIEDgQSAD/NFzkREgEXOREzETMRMxEzETMRMxEzMTABBQclAycTJTcFEyU3BRMXAwUHJQJMARxH/uO0gbT+5UYBH8b+5EcBHbZ/tgEfSv7lAbCme6T+x0oBO6R7pAFapH2kATlJ/sSke6QAAQC0BHsDxQXNABAAPEAJAAYNCQYJERIDuP/oQBkJDUgDCQsZCykLAwsADwgvCJ8IrwjPCAUIAC9dMzNdMisREgE5OREzETMxMAEGBiMiJjU0MyE2MzIVFAYjAYsGNjA4M20BywpibTY5BNkrM0c4dV5zOUgAAAAAAQD0BNcEDAYUABUAMkAfFAoWFxUUFAALYAtwCwMLCw4PBS8FbwV/Ba8F7wUGBQAvXTMzL10zLzMREgE5OTEwATI+AjMyFhYVFSMmJiMiBgcGIyM1AQJOhXdwOmluP7YLKy4eSUqGtxAFnCUtJjZ1YTE7NBgeN8MAAQHNBMMDBAZYABEAHEAOCwAAEhMDDw9fD68PAw8AL13EERIBOREzMTABNDYzMhYVFA4CFRQWFxUmJgHNVE5HTiUtJUQxgLUFtklZOjUbGhEREyA6DFYGiwAAAAABAcsEwwMCBlgAEQAiQBEMAwAHAwcSEw8PA18DrwMDAwAvXcQREgE5OREzETMxMAEUBgc1NjY1NC4CNTQ2MzIWAwK1gDBFJS0lTkdOVAW2YosGVgs7IBMRERobNTpZAAAIACn+wQfBBZEADAAaACgANgBEAFIAXwBtALFAaVA0SCwLGAMQQiY6HlYeXiYQGCxjNGsKbm8tJh8DEDQBNCkiMDAbKWReVwMQawFrYFpnZ1NgSUI7AxBQAVBFPkxMN0UpYEVFYCkDABEQGAEYFFANgA0CDw0BDQQQCwELgAcPAD8AbwADAAAvXTIazXEyL11dM81xMhIXOS8vLxEzMxEzEM1xFzIRMzMRMxDNcRcyETMzETMQzXEXMhESARc5ETMRMxEzETMRMxEzMTABMhYXIyYmIyIGByM2EzIWFyMmJiMiBgcjNjYBMhYXIyYmIyIGByM2NiEyFhcjJiYjIgYHIzY2ATIWFyMmJiMiBgcjNjYhMhYXIyYmIyIGByM2NgEyFhcjJiYjIgYHIzYhMhYXIyYmIyIGByM2NgPpXXEHTwU8RU4yBUsLxVxzBk8FPEVOMgVLBWQCq1xzBlAFPEROMgVMBWX75lxzBlAFPEROMgVMBWUE6FxzBlAFPEROMgVMBWX75lxzBlAFPEROMgVMBWUFp1xzBlAFPEROMwVLC/rUXHMGUAU8RE4yBUwFZQWRZV0sLCkvwvnyZlwsLCkvWWkBF2ZdLSsnMVppZl0tKycxWmkD22ZdLSsnMVppZl0tKycxWmn+GGhaLCwoMMJmXC0rJzFaaAAIACn+fwd9BdMABwAPABcAHwAmAC0ANQA9AGlARAkFDQEVJDoXPSABBSc1GDIrHA4+PyMmKi1PO187rzu/OwQ7NkAzUDOgM7AzBDMuNi0XHyYuBwgIBy4mHxctNggMBQwEAD8vEhc5Ly8vLy8vLy8QzV0QzV0QzRDNERIBFzkRMxEzMTAFFwYGByM2NwMnNjY3MwYHATcWFhcVJicFByYmJzUWFwE0NjcXBgcBFAYHJzY3AyImJic3FhcBFxYWFwcmJwQ3CxFGJGE1ETsLE0kfYTQSAiMOR8hB3YH7aA5Cv0/dgQOmrphF6j/86LuLRb1rKBE4UA9De0wDaBMmWhdDkDcjDkK/T92BBJgOR8hB3IL+FgsTSR9hNRE7CxFGJGE1EQGoF1s4RJgu/JUXXjNEdU8C4FfALkbGY/zpBELCPUbeSwACALj+VgcrB5EAEwAhAEtAJwsNBBMIEA0JDAwUDRoTBSIjEAQAEyEaGh4XBgADExILIg0IalkNEgA/KwAYPz8/M8YyMi8zERI5ORESARc5ETMRMzMRMxEzMTATIREHBzMBIREhAyETIRE0EyMBIQEGBiMiJichFhYzMjY3uAEXBAoGAqMBcwFOsv6ovP7sEgj9Wv6LBHcT9Obt4w4BEQdZc2NlCwW2/T691wRW+1T9TAGqAr6NARX7oAeRu6ukwmdTW18AAgCg/m8GTgY/ABEAIABHQCUJCgMQBg4HCgoSDgsZEAYhIgMOEBEgGRkdFQQRDxAVCwZfWQsVAD8rABg/PzPGMjIvMxESOTkREgEXOREzETMRMxEzMTABERQDASERIQMhEyERNDcBIREBBgYjIiYmJyEWFjMyNjcBxxcCBAFvASuS/t6J/tkU/f7+kgQpFfPmoctnCgEQCVlxZ2QIBF7+Rkb+8AMQ/IH9kAGRAb532fzyBF4B4b2pSpaGbE5fWwAAAgAvAAAEvgW2ABEAGgCHQDYCFg0LAAQSEg8LCBYLFhscAw0PDQENBQ4NaVkADg4EEAQaaVkwBKAEAuIEAQOBBAEEXQQBBQS4/6hAFA1JAwRzBAIMAwQECxADCxJpWQsSAD8rABg/EjkvX15dK19dX11fXXErERIAORgvMysAX15dETMREgE5OREzETMzETMzETMRMzEwASEVIRUzIAQVECEhESM1MzUhETMyNjU0JiMjAe4BK/7VegEeATj9pP5WiYkBNmidkpS0TwUf/pzo1P43BCH+l/tIZWZlWQAAAAACAAQAAASiBhQAEQAZAI9AVQYSEQ8ECBcXAQ8MEg8SGhsHBBEAABFlWQAADwIIFmJZEAigCAJ/CAGPCO8IAv0IAcMIAQNRCGEIAgS6CAFmCAEDCCQNSQ8IAQoGCAgPAgAPF2JZDxUAPysAGD8SOS9fXl0rX11dX3FfXV1xcnIrERIAORgvKxESADk5ERIBOTkRMxEzMxEzMxEzETMxMBMzNSEVIRUhETMgFhUQISERIwE0JiMjETMyBJwBMQF5/ofXAQL4/hL97JwDbWhn0dXLBTXf38b+P6Sm/pwEb/zxQTr++AACALgAAASqBbYADwAbAGBANAQAAxcQCgoLFRIAFxcSEwsEHB0UFRsQAwYMCQkQaVlQCQEPCR8JAgkDCQkMCxIMG2lZDAMAPysAGD8SOS9fXl1dKxESADk5ERI5ORESARc5ETMRMxEzETMRMxEzMTABFAYHFwcnBiMjESERISAEATM3JzcXNjU0JiMjBKpfXViYc1Zyhf7KAdMBCgEV/USRF0yZZSl3f40D7oHJPn1wpBX9+AW25f41Am1ujzVabWgAAAAAAgCg/hQEtARzABcAKABTQC0UEQoDAwcTIREmJiQhIgcFKSojJBgfCgIWEwQADggPBxsOGF1ZDhAWH2BZABYAPysAGD8rABg/PxESFzkREjk5ERIBFzkRMxEzETMRMxEzMTAFIicjFhURIREzFzM2NjMyEhEQBxcHJwYDIgYHFRQWMzM3JzcXNjU0JgMGxXAQEP7P+CsQNqJjxuCRXp5sNJdxaAJrdBESf6hqF2UUj4wW/jsGSpFTU/7O/vD+0aB7dosQA5OLoCG0nAKee4NObKWlAAABAC8AAARQBbYADQA9QB8KCAMHBwwIAQUIAw4PBgoLCmlZAwsLDQgSDQJpWQ0DAD8rABg/EjkvMysRADMREgEXOREzMxEzETMxMAEVIREhFSERIREjNTMRBFD9ngGR/m/+yomJBbb+/pr+/awCVP4CZAAAAAEABAAAA74EXgANAD1AHwoIAwcHDAgBBQgDDg8GCgsKXlkDCwsNCBUNAl1ZDQ8APysAGD8SOS8zKxEAMxESARc5ETMzETMRMzEwARUhFSEVIREhESM1MxEDvv4AAUz+tP7PiYkEXvjZ6/5eAaLrAdEAAAAAAQC4/gAFeQW2AB4AWkA1BxwWCQMDBA8cBBwfIAsAallQC2ALgAvAC9AL8AsGDwsBCwMLCwQFExlqWRMjBBIFCGlZBQMAPysAGD8/KxESADkYL19eXV0rERIBOTkRMxEzETMzETMxMAEiBxEhESEVIRE2MzIEEhUUAgYjIiYnERYzMjY1NCYCbTVK/soDmP2ea5XBATGZi/uZbotKgYWLouYCGQ399AW2/v5vDKr+0c3D/tehFhkBEC/NsMTIAAAAAQCg/goEiQReAB0ASkAnFg0GGBISEwANEw0eHxoPYVkPGgELAxoaFBMVFBddWRQPBAphWQQcAD8rABg/KwAYPxI5L19eXSsREgE5OREzETMRMzMRMzEwJRQCBiMiJxEWFjMyNjUQISIHESERIRUhFTYzMhYSBIl64JOOci15MXR9/sUqLv7PAzH+AEpLnvuKRLP+/oUzAQcYHqOXATEG/o0EXvjxDIz+/AAAAAEAAP5WCBIFtgAVAExAKggNARUGEREDEgwNDQkSABUFFhcACQMTBhAGFQcEAQMSFRINIg8KalkPEgA/KwAYPz8zPzMzEhc5ERIBFzkRMxEzMxEzETMRMzEwAQEhAREhEQEhAQEhESERIwERIREBIQII/hUBPwHZASEB2QFA/hQBUgE9/tWo/hf+3/4X/rQC+AK+/TwCxP08AsT9Qv4S/UwBqgLl/RsC5f0bAAABAAD+bwdYBF4AFQBQQCsSBQkGCgMODgAPCQoKDxMUBBYXEwAQAw0GBhQSCgwEARQPDxIVDAdfWQwVAD8rABg/Mz8zMxDGERIXORESARc5ETMRMzMRMxEzETMyMTABESERASEBASERIREjAREhEQEhAQEhAvABHAGOATv+ZAEVAQr+7pD+Vv7k/lb+ugHD/mQBOwI/Ah/94QIf/ej+mf2QAZECN/3JAjf9yQJGAhgA//8AXv4UBNcFywImAbEAAAEHA3wBngAAAA65AAH/2bQ1LyEHJQErNf//AE7+FAQjBHMCJgHRAAABBwN8ATEAAAAOuQAB/860NzEdFiUBKzUAAQC4/lYF4wW2AA4APUAgDQMLBwcIAgMDDggDDxALBg4DBQwJAwgSAyIFAGpZBRIAPysAGD8/PzMSFzkREgEXOREzETMRMxEzMTABIREhESMBESERIREBIQEEnAFH/tW4/e7+ygE2AgwBSv3rAQr9TAGqAuX9GwW2/TwCxP1CAAABAKD+bwU1BF4ADgBCQCEBBQIGDgoKCwUGCwYPEAIOCQMLDAYIAAwPCxUIA19ZCBUAPysAGD8/MxDGERIXORESATk5ETMRMxEzETMRMzEwASEBATMRIREjAREhESERA30BUP5FASn6/u6J/jf+zwExBF796P6Z/ZABkQI3/ckEXv3hAAABALgAAAVQBbYAEwBQQCgNDw8VBgICAwsSEggTAxMUFRMREAAHCwkDDAYADgYDAQEDDAQDEAMSAD8zPzMSOREXMxESFzkREjk5ERIBOTkRMzMRMxEzETMRMxEzMTABBxEhESERNzcRMxUBIQEBIQEVIwJqfP7KATZ6AoYBBAFY/gICAv6g/wCGAmRa/fYFtv1jrAIBYroBR/15/NEBnN4AAQCgAAAEywReABIASEAlChQCERESBg0NAw4IDhIDExQPDA4DCwMGBAMHAhACEgcADwsSFQA/Mz8yETk5ERIXORIXORESARc5ETMzETMRMxEzETMxMBMhETcRMxU3IQEBIQMVIxEnESGgARxjkbgBPP5FAeL+uteRYf7kBF794XsBPIHp/ej9ugEKsAFkef3JAAAAAAEABAAABSUFtgAUAFZAKwsNDRYGDxQSBAgREQESDA8SDxUWBxQAFGlZBAAAAg8MCAMQEBIKAgMOEhIAPzM/MxI5ERczETkvMysRADMREgE5OREzETMzETMzETMRMxEzETMxMBMzNSEVMxUjETcBIQEBIQEHESERIwSJATaJiXoBjAFY/gICAv6g/oGD/sqJBS+Hh/7+6KwB8f15/NECaF799gQxAAAAAAEABAAABPYGFAAWAFFAKQ0PDxgGDhYJFA4RFBEXGAcWABZlWQQAAAwCEQ4KAxISFAwCAAwPEBQVAD8zPz8REjkRFzMREjkvMysRADMREgE5OREzETMzETMRMxEzMTATMzUhFSEVIREHMzcBIQEBIQEHESERIwScATEBO/7FEASFATkBWP5EAdf+oP6+g/7PnAVzoaHH/rL+qgFU/hv9hwHFaf6kBKwAAAEAAAAABd0FtgAOAERAIgUHBxAOAgsLDAYJDAkPEAkGAgMKCgwABAMIDBIADmlZAAMAPysAGD8zPxESOREXMxESATk5ETMRMxEzMhEzETMxMBEhETcBIQEBIQEHESERIQJ7ewGLAVj+AgIC/qD+gYP+y/66Bbb9Y6wB8f15/NECaF799gS0AAABAAAAAAWPBF4ADAA+QCAGDgwCCQkKBAUKAw0OBQICCAUDCgADDwcKFQAMYFkADwA/KwAYPzM/ERIXOREzERIBFzkRMxEzMhEzMTARIREBIQEBIQERIREhAoEBrAE7/kYB4f67/jf+4/6cBF794QIf/ej9ugI3/ckDeQAAAQC4/lYGkQW2AA8Af0BNDAgICQANBQIDAwUJAxARAAwBDAYMB2lZRgwB1gwBEgwBAyEMAbEMAQSjDAFMDAE7DAEZDAEDDwyPDAIJBgwMBQ4KAwkSAyIFAGpZBRIAPysAGD8/PzMSOS9fXl1fXV1dXV9dcV9xXXErAF9eXRESARc5ETMRMzMRMxEzMTABIREhESERIREhESERIREhBWYBK/7V/sv9vf7KATYCQwE1AQr9TAGqAnf9iQW2/cMCPQAAAQCg/m8FwQReAA8AZkA8AQ0NDgUCCgcICAoOAxARCAoBDGBZASQfIEi6AcoBAmYB9gECAwEkDUkPAQEKBgEBCgMPDw4VCgVfWQoVAD8rABg/PzMSOS9fXl0rX11dKysAGBDGERIBFzkRMxEzMxEzETMxMAERIREhESERIREhESERIREB0QGqATEBFf7t/s3+Vv7PBF7+UgGu/IH9kAGRAc3+MwReAAAAAQC4AAAGrAW2AA0AfEBKAA8KBgYHAgsDBwMODwAKAQwGCgVpWUYKAdYKARIKAQMhCgGxCgEEowoBTAoBOwoBGQoBAw8KjwoCCQYKCgcMCAMDBxIMAWlZDAMAPysAGD8zPxESOS9fXl1fXV1dXV9dcV9xXXErAF9eXRESATk5ETMzETMRMxEzMTABIREhESERIREhESERIQas/rr+y/29/soBNgJDAnsEtPtMAnf9iQW2/cMCPQAAAAEAoAAABhAEXgANAGBAOQUPAQsLDAcCCAwIDg8BCmBZASQfIEi6AcoBAmYB9gECAwEkDUkPAQEKBgEBDAMNDwgMFQMGYFkDDwA/KwAYPzM/ERI5L19eXStfXV0rKxESATk5ETMzETMRMxEzMTABESERIRUhESERIREhEQHRAaoClf6c/s/+Vv7PBF7+UgGu5fyHAc3+MwReAAAAAAEAuP4ACJoFtgAgAGBAOQ0ZHR4AGRkaBhMTGh4DISICFmpZUAJgAoACwALQAvACBg8CAQsDAgIeHwoQalkKIxoeEh8caVkfAwA/KwAYPzM/KxESADkYL19eXV0rERIBFzkRMxEzETMRMxEzMTABNjMyBBIVFAIGIyImJxEWMzI2NTQmIyIHESERIREhESEFFHOmuQEikov7mW2HUIGFg6rV5Tpi/sv+D/7KBFwDIxCr/tPOw/7XoRQbARAv1ajEyBX9/AS0+0wFtgAAAAEAoP4KBtUEXgAeAFJALQYSFhcaEhITAA0NExcDHyAbEGFZDxsfGwILAxsbGBMXFRgVYFkYDwQKYVkEHAA/KwAYPysAGD8zEjkvX15dKxESARc5ETMRMxEzETMRMzEwJRQCBiMiJxEWFjMyNjU0JiMjESERIREhESERNzIWEgbVeeCVjnIteTFzf5uWBv7P/on+zwPZUJfxhESz/wCHMwEHGB6jl5Wc/ocDefyHBF7+HwSN/v0AAAACAHf/rAX6Bc0AKQA0AHhAJxcvCAAcES8kAzIAKioyISQRBTU2MiwfJyxsWQAnECcgJ4AnBBMDJ7j/wEAcCw5IJycOFAoFa1kKBWtZChQaaVkUBA4faVkOEwA/KwAYPysAGC8rKxESADkYLytfXl0rERIAORESARc5ETMRMxEzETMRMxEzMTABFAYHFjMyNxUGIyInBiMgABEQACEyFhcHJiMgERQWMzI3JiY1NDYzMhYFNCMiBhUUFhc2NgXNYnEuQkxEPnStkWiS/sr+nQFFAT44ki5OXE7+tsixGQY/Tce/u9D+63A3PjgmPUoCpo/3cBAW8RliIgGGAVcBfQGHGRLwHf4E5vsETPN92uPy3+l7anqvMTi5AAAAAAIAXP+4BPoEcwAqADMAe0AlFzAIAB0RMCUDMgArKzIiJREFNDUyLiAoLl9ZACgQKCAoAxADKLj/wEAfCQxIKCgOFAoFX1kAChAKAg0DChQaXVkUEA4gXVkOFgA/KwAYPysAGC9fXl0rERIAORgvK19eXSsREgA5ERIBFzkRMxEzETMRMxEzETMxMAEUBgcWMzI3FQYjIicGIyIAERAAMzIWFwcmIyIGFRQWMzI3JiY1NDYzMhYHNCYjIhUUFzYE3VZOHCo7QEhUk39ihu3+5QER+Sp5MENYOG9ob2wZDCodpqWYsvEsLVpMZwH8dro0BxHTF1YiATcBCAEUATQWE+QZpriYqARPgU2nsbmlOUiDfldA//8Ad/4UBNEFywImACYAAAEHA3wCOQAAAAu2AWolHw0TJQErNQAAAP//AFz+FAPdBHMCJgBGAAABBwN8AaAAAAALtgFZJB4CByUBKzUAAAAAAQAp/lYEeQW2AAsAN0AcCgIABQIDAwUHAwwNAyILBwgHaVkIAwUAalkFEgA/KwAYPysRADMYPxESARc5ETMRMxEzMTABIREhESERIREhESEC7AEr/tX+yv5zBFD+cwEK/UwBqgS0AQL+/gAAAQAv/m8EPQReAAsAOEAcAQUDCAUGBggKAwwNBggCCgsKYFkLDwgDX1kIFQA/KwAYPysRADMYEMYREgEXOREzETMRMzEwARUhESERIREhESE1BD3+kgES/u7+z/6RBF7l/Wb9kAGRA3nlAAD//wAAAAAE/gW2AgYAPAAAAAEAAP4UBJgEXgAOACZAEQ0QAwABAQ8QCAMOAgwDDwEbAD8/My8zEjkREgE5ETMyETMxMAEhEQEhExYXFzM2NxMhAQLl/s3+TgFQsBofDQwkIrIBTv5N/hQB7ARe/ghJjzy0YAH4+6IAAQAAAAAE/gW2ABAARkAjAhIPBAgIDQkGCQsDERIDDgkPBwsMC2lZBAwMAAAJAQ8DCRIAPz8zEjkROS8zKxEAMxESOTkREgEXOREzMxEzMhEzMTABASEBFSERIREhESERITUBIQJ/ATEBTv4bAT/+wf7M/sEBP/4bAVADXAJa/IMp/v7+8gEOAQIfA4cAAAAAAQAA/hQEmAReABQAQEAfExYJAQMGBBQDAwgEBBUWDgcSCQ8EGwIGBwZgWRQHFQA/MysRADMYPz8zEjkREgE5ETMzETMRMxEzMhEzMTAhFSERIREhNSEBIRMWFxczNjcTIQEECP7d/s3+3QEj/k4BULAaHw0MJCKyAU7+TeX++QEH5QRe/ghJjzy0YAH4+6IAAQAA/lYFyQW2AA8ARkAkDgMKCAwJDwYCAwMGCQgEEBEPBgwGDAgNCgMIEgMiBQBqWQUSAD8rABg/Pz8zEjk5ERI5ERIBFzkRMxEzETMRMxEzMTABIREhESMBASEBASEBASEBBKQBJf7Vqv6s/qz+tAHl/joBVgE7ATUBTv41AQr9TAGqAin91wLyAsT98gIO/SsAAAEACv5vBQIEXgAPAEZAJAUJAQ8GAw0JCgoNAA8EEBEGDQMNAw8BCiIEAQ8PFQwHX1kMFQA/KwAYPz8zPxESOTkREjkREgEXOREzETMzETMRMzEwAQEhExMhARMhESERIwMDIQGF/pgBWtnbAVr+lOcBAv7utevs/qYCOwIj/pwBZP3d/qT9kAGRAX/+gQAAAAEAKf5WB0gFtgAPAEJAIgUIBwALCA0ODggAAgQQEQ4iBgIDAmlZCQMDCwcAB2lZABIAPysRADMYPzMrEQAzGD8REgEXOREzETMRMxEzMTAhESERIREhESERIREhESERAZj+kQQ7/mkCGgE2ASv+1QS0AQL+/vxOBLT7VP1MAaoAAQAv/m8GNwReAA8AREAiAQQOAwwHBAkKCgQMAxARCgwCDg8OYFkFDw8HAwwDYFkMFQA/KxEAMxg/MysRADMYEMYREgEXOREzETMRMzMRMzEwARUhESERIREhESERIREjNQOF/tkBlgExARL+7vwI/gRe5f1sA3n8gf2QAZEDeeUAAAAAAQBt/lYGRgW2ABcARUAmDwwAFQUCAwMFDAMYGQkSaVkACRAJAhMDCQkFFg0DAyIFAGpZBRIAPysAGD8/MxI5L19eXSsREgEXOREzETMzETMxMAEhESERIREGBiMiJjURIREUFjMyNjcRIQUbASv+1f7Kms1d0eMBNWJ1UqN3ATYBCv1MAaoCNTQmybYCXP38amshKQKPAAABAHv+bwWyBF4AFgA+QB8BFQkGDgsMDA4VAxcYDA4SA19ZEhIOBxYPDglfWQ4VAD8rABg/MxI5LysAGBDGERIBFzkRMxEzMxEzMTABERQzMjY3ESERIREhESERBgYjIiY1EQGsh1iXTQExARL+7v7ParZVt8gEXv5nkiggAeP8gf2QAZEBvDguu60BoAAAAAABAG0AAAUbBbYAGQBOQCkIBQ4YGAsZExAUFBkFAxobGRQXAgwOBgsCC2lZoAKwAgICAhQRBgMUEgA/PzMSOS9dKxESADk5ETMSORESARc5ETMzETMzETMRMzEwAQcjIiY1ESERFBYXETMRNjcRIREhEQYHESMCcSgo0eMBNWJthVmWATb+yoFuhQHdAsm2Alz9/G5lAgFI/sINMwKP+koCNS0Y/rwAAAABAHsAAASgBF4AGQBIQCQIBQ4YGAsZExAUFBkFAxobGRQXAgwOBgoCCl9ZAgIUEQYPFBUAPz8zEjkvKxESADk5ETMSORESARc5ETMzETMzETMRMzEwAQYjIiY1ESERFDMzETMVNjcRIREhEQYHFSMCRhkzt8gBMYcTfU5eATH+z2lDfQFaBLutAaD+Z5IBAPEQKQHj+6IBvDYT8gABALgAAAVmBbYAEwAtQBYCEhITCQoTChQVBQ5pWQUFEwADChMSAD8zPxE5LysREgE5OREzETMRMzEwEyERNjYzMhYVESERNCYjIgYHESG4ATaT1lvO5v7LYnVPp3b+ygW2/cszJ8e4/aQCBGprICr9cQAAAAEAoAAABMUEXgARAC1AFgoGBgcRAAcAEhMNA19ZDQ0HCA8ABxUAPzM/EjkvKxESATk5ETMRMxEzMTAhETQjIgcRIREhETY2MzIWFREDk4eQq/7PATFqtFe3yAGkh0j+HQRe/kQ4Lrut/mAAAAIAAP/sBvIFzQAhACgAc0BAFA4OBB4lHx8XCh4mCiYpKhAQFh8LFgtpWSXqFvoWAn8WjxYCWBYBHBYBAw4WAQwEFhYHGhoiaVkaBAcAaVkHEwA/KwAYPysREgA5GC9fXl1fXV1dXTMrEQAzETMYLxESATk5ETMRMzMRMxEzMhEzMTAlMiQ3EQYEIyAAAyMiJjU0NzMGBhUUMzMSACEgABEVIRYWEyIGByE0JgRiigFMbn3+46z+wv6CHT+jpTXqCBNgKSUBZAElAVwBW/vVDdKVn8UMAuW27l1E/upLQgFVATaKenRZEUgeWAEcATj+df58R8HIA92zn7CiAAIAAP/sBWAEcwAeACUAa0A7HBcXDwcIAAcjIwAVAyYnGRkACBUPFQENBQAVZlkipQABaQABDAAcAAIQAwAAEgMDH19ZAxASC2BZEhYAPysAGD8rERIAORgvX15dXV0zKwBfXl0RMxEzGC8REgEXOREzETMRMzIRMzEwATYkMzIAFRUhFhYzMjY3FQYGIyAAJyA1NDczBhUUMyUiBgchJiYBTiEBFtvyAQ79GQWVh2q7Yk6+hv79/s8T/rgpzRlgAiVefAkBwwJ3Aq7b6v7z75SCkist7CcoAQXy4GBFNzVO7HN5cHwAAAACAAD+VgbyBc0AJAArAH5ARw0HBx8XKBgYEAMjJBcpKSQDAywtCQkPGAQPBGlZKOoP+g8Cfw+PDwJYDwEcDwEDDg8BDAQPDwATJCITJWlZEwQiG2lZACITAD8zKwAYPysAGD8REjkvX15dX11dXV0zKxEAMxEzGC8REgEXOREzETMRMzMRMxEzMhEzMTAFJgADIyImNTQ3MwYGFRQzMxIAISAAERUhFhYzMiQ3EQYGBxEhEyIGByE0JgO89v7bGj+jpTXqCBNgKSUBZAElAVwBW/vVDdK8igFMbm3Wfv7Xf5/FDALltggoAUkBDop6dFkRSB5YARwBOP51/nxHwchdRP7qQD4J/mQGdbOfsKIAAAIAAP5vBWAEcwAgACcAdkBBCgUFHBQVDR8gFCUlIA0DBCgpBwcNIBUDDwMBDQUNA2ZZJKUNAWkNAQwNHA0CEAMNDQAQECFfWRAQAB4eGF9ZHhYAPysRADMYPysREgA5GC9fXl1dXTMrAF9eXREzGC8RMy8REgEXOREzETMRMxEzMhEzMTAFJiYnIDU0NzMGFRQzMzYkMzIAFRUhFhYzMjY3FQYHESETIgYHISYmAt210Q/+uCnNGWARIQEW2/IBDv0ZBZWHartif7L+7YVefAkBwwJ3Aij4xeBgRTc1Ttvq/vPvlIKSKy3sPwz+fwUrc3lwfP//AEIAAALbBbYCBgAsAAD//wAAAAAHiweRAiYBsAAAAQcCNgF1AVIAFbQBFQUmAbj//bQYEhEKJQErNQArNQD//wAAAAAG/AY/AiYB0AAAAQcCNgEvAAAAC7YBABgSDgclASs1AAAAAAEAuP4ABa4FtgAfAEZAIwoQCxcHAwMEEB0EHSAhBwsLAGxZCwsEBRQaalkUHAkFAwQSAD8/Mz8rERIAORgvKxEAMxESATk5ETMRMxEzMzMRMzEwASIHESERIRE3ASEBMzIEEhUUAgYjIiYnERYzMjY1NCYCqEtv/soBNpEBiQFY/b8EyAEvlIv7mW6LSoGFjp/iAhkZ/gAFtv1AzwHx/VCc/uTBw/7XoRYZARAvzbDDyQAAAAEAoP4KBPgEXgAdAEZAIxoAGw0GGBQUFQANFQ0eHxgbGxBdWRsbFRkWDxUVBAphWQQcAD8rABg/PzMSOS8rEQAzERIBOTkRMxEzETMzETMRMzEwJRQCBiMiJxEWFjMyNjU0JiMiBgcRIREhEQEhATIABPh54JWOci15MXR+npkyeh/+zwExAbIBWP4n5QERRLP/AIczAQcYHqWVlJ0VDP6oBF7+EwHt/gz+2wABABD+VgaLBbYAFwA7QB8DAAUBBAQFDgMYGQMiBQBqWQUSFgdpWRYDDBFpWQwTAD8rABg/KwAYPysAGD8REgEXOREzETMzMTABIQMhEyERIQcCAgYnIic1FjMyNhISEyEFPQFOsv6ovP7L/poQPl+2m1RAOjM1PjdbIAObAQr9TAGqBLSG/gH+Y6gCFv4UYQEHAlcBCwAAAAEAAP5vBbQEXgAVADpAHg0DBAAFAQQFBBYXBQBfWQUVAxQHYFkUDwsQYVkLFgA/KwAYPysAGC8/KxESATk5ETMRMxEzMjEwJSEDIRMhESECAgYjIic1FjMyNhITIQSJASuR/t2J/s/+5yBcmXxqRDExOU09FgNO3/2QAZEDef6J/o+lIPQUpAF/AU8AAQC4/gAFZgW2ABcAekBJBhQQEBEAFQ0RDRgZABQBDAYUD2lZRhQB1hQBEhQBAyEUAbEUAQSjFAFMFAE7FAEZFAEDDxSPFAIJBhQUERIECmpZBBwWEgMREgA/PzM/KxESADkYL19eXV9dXV1dX11xX3FdcSsAX15dERIBOTkRMzMRMxEzMzEwJRQCBiMiJxEWFjMyNjURIREhESERIREhBWaG96G/hUuEUn6O/b3+ygE2AkMBNVqx/uyVLwEQGhXBrAH6/YkFtv3DAj0AAQCg/goErAReABYAXEA3DwUBAQIJBhUCFRcYBQBgWQUkHyBIugXKBQJmBfYFAgMFJA1JDwUBCgYFBQIHAw8CFQwSYVkMHAA/KwAYPz8zEjkvX15dK19dXSsrERIBOTkRMzMRMxEzMzEwAREhESERIREhERQAIyImJxEWMzI2NxEB0f7PATEBqgEx/vjoTHZAcHJsbwQBzf4zBF7+UgGu+7n3/uoYIAEGOpSNAZ4AAAABALj+Vga0BbYADwCDQE8DBQwICAkADQUBBAQFCQMQEQAMAQwGDAdpWUYMAdYMARIMAQMhDAGxDAEEowwBTAwBOwwBGQwBAw8MjwwCCQYMDAUOCgMJEgMiBQBqWQUSAD8rABg/Pz8zEjkvX15dX11dXV1fXXFfcV1xKwBfXl0REgEXOREzETMzETMRMxEzMTABIQMhEyERIREhESERIREhBWYBTrL+qLz+y/29/soBNgJDATUBCv1MAaoCd/2JBbb9wwI9AAAAAAEAoP5vBdcEXgAPAGpAPggJAQ0NDgUCCgYJCQoOAxARCAoBDGBZASQfIEi6AcoBAmYB9gECAwEkDUkPAQEKBgEBCgMPDw4VCgVfWQoVAD8rABg/PzMSOS9fXl0rX11dKysAGBDGERIBFzkRMxEzMxEzETMRMzEwAREhESERIQMhEyERIREhEQHRAaoBMQErkf7dif7P/lb+zwRe/lIBrvyB/ZABkQHN/jMEXgABAG3+VgUbBbYAFwBFQCYPDAIDABUFBQMMAxgZCRJpWQAJEAkCEwMJCQEWDQMDIgEEalkBEgA/KwAYPz8zEjkvX15dKxESARc5ETMzETMRMzEwISERIREzEQYGIyImNREhERQWMzI2NxEhBRv+/v7V95rNXdHjATVidVKjdwE2/lYCtAErNCbJtgJc/fxqayEpAo8AAQB7/m8EoAReABYAUEAtARULDAkGDg4MFQMXGAwKQBIDX1kPEh8SLxJvEn8SBQ0DEhIKBxYPCg1fWQoVAD8rABg/MxI5L19eXSsAGhgQzRESARc5ETMzETMRMzEwAREUMzI2NxEhESERIREzNQYGIyImNREBrIdYl00BMf78/u3marZVt8gEXv5nkiggAeP7ov5vAnDdOC67rQGgAAAAAAEAuP5WCCEFtgAYAElAJxASAgUFBg0WFxMDEg4RERIGAxkaCRcCAxILBwMABhIQIhINalkSEgA/KwAYPz8zPzMSFzkREgEXOREzERczMxEzETMRMzEwIQEjEhURIREhATMBIREhAyETIRE0NhMjAQMj/qAJE/7rAaYBWgYBbwGmAU6y/qi8/t8DDAn+hwR7/qJ1/VgFtvuiBF77VP1MAaoCtDGAART7hwAAAAABAKD+bwdMBF4AHABAQCEDBBARAAUBBAQFEQMdHhcPBgMSAwULERUbEg8FAF9ZBRUAPysAGD8zPzMQxhIXORESARc5ETMRMxEzETMxMCUhAyETIREHBgcDIwMmJycRIREhExYWFz4CEyEGIQErkv7eif7jEDYrxtnJKzET/uQBpMAeMwkhJSyxAaDf/ZABkQNxPtNs/gwB+G7HRPyPBF7+I03IR5aDbgGyAAD//wBCAAAC2wW2AgYALAAA//8AAAAABYUHkQImACQAAAEHAjYAdQFSABNACwIAFA4FBiUCEQUmACs1ASs1AAAA//8AVv/sBFwGPwImAEQAAAEGAjYpAAALtgIuKSMIGCUBKzUA//8AAAAABYUHVgImACQAAAEHAGoAVgFSABdADQMCAw4gBQYlAwIjBSYAKzU1ASs1NQAAAP//AFb/7AQ7BgQCJgBEAAABBgBq+wAAELEDArj/7bQjNRIXJQErNTX//wAAAAAHJQW2AgYAiAAA//8AVv/sBv4EdQIGAKgAAP//AHYAAARBB5ECJgAoAAABBwI2AA4BUgAVtAEPBSYBuP//tBIMAgslASs1ACs1AP//AFz/7ARiBj8CJgBIAAABBgI2HQAAC7YCCyIcChElASs1AAACAKT/7AYSBc0AFAAbAEtAKQMZEAoSGBAYHB0RGWlZehEBaREBAxEBCwMREQ0HBwBpWQcEDRVpWQ0TAD8rABg/KxESADkYL19eXV1dKxESATk5ETMzETMzMTABIgQHETYkMyAAERAAISAAETUhJiYDMjY3IRQWAzOU/sFwiwEXowFaAYP+lP60/qj+ogQrDdOVo8ML/Rq0BMtbRwEMU0X+bv6e/p7+dQGHAYdIwMn8I7abr6IAAAAAAgBY/+wEXgRzAAYAGwBVQDIYBBAKEhIDEAMcHREEZlkPER8RAs8R3xECGREBAw8RAQsGERENBwcVYFkHEA0AX1kNFgA/KwAYPysREgA5GC9fXl1fXV1xKxESARc5ETMRMzMxMCUyNjchFhYTIAAREAAjIgA1NSEmJiMiBgc1NjYCTFl1Cf5UAm85ARABLf7q+uz+9gLRBZCCX7JpVb/FcXpwewOu/tP+8f7o/s0BC/CUgpImMuwsJAAAAP//AKT/7AYSB1YCJgLeAAABBwBqAO4BUgAasQMCuP/bQAocLgQKJQMCMQUmACs1NQErNTX//wBY/+wEXgYEAiYC3wAAAQYAav8AABCxAwK4/+S0HC4ZCiUBKzU1//8AAAAAB4sHVgImAbAAAAEHAGoBWAFSABdADQIBJwUmAgEAEiQRCiUBKzU1ACs1NQAAAP//AAAAAAb8BgQCJgHQAAABBwBqARAAAAANtwIBABIkDgclASs1NQD//wBe/+wE1wdWAiYBsQAAAQcAagAtAVIAF0ANAgE8BSYCAQAnOSEHJQErNTUAKzU1AAAA//8ATv/sBCMGBAImAdEAAAEGAGrOAAANtwIBDyk7ChAlASs1NQAAAAABADn/7ARqBbYAGQBJQCYBBhUFCQkVFRkCDgQaGwAGBhlsWQYGDAUCAwMCaVkDAwwSa1kMEwA/KwAYPysREgA5EjkYLysRADMREgEXOREzETMRMzMxMAEBIREhFQEWBBUUBCEgJxEWFjMyNjU0JiMjARsBaP3nA7/+UPEBAP67/tf+/cBd62inpdDPewNaAVwBAMb+ZArcxNDuTwEHLDVpcmZfAAEAOf4UBFYEXgAaAEhAJAEGBQkCDwYaCRYaFhscAAYGGV9ZBgYNAwMCXlkDDw0TXVkNGwA/KwAYPysREgA5GC8rEQAzERIBOTkRMxEzMzMRMxEzMTABASE1IRUBFhYVFAYEIyInERYWMzI2NTQmIyMBGwGV/bIDx/5G7fqP/u7B+8Bc42WepsrGdgH2AX/pxv5iGv7gl994UAEGLTOHf4qDAAD//wC4AAAF3Qb+AiYBsgAAAQcBTQDbAVIAFbQBEwUmAbj//7QTEg8IJQErNQArNQD//wCgAAAFIwWsAiYB0gAAAQYBTXUAAAu2AQEREA0GJQErNQD//wC4AAAF3QdWAiYBsgAAAQcAagDdAVIAF0ANAgElBSYCAQAQIg8IJQErNTUAKzU1AAAA//8AoAAABSMGBAImAdIAAAEGAGp1AAANtwIBAQ4gDQYlASs1NQAAAP//AHf/7AXnB1YCJgAyAAABBwBqAMMBUgAXQA0DAisFJgMCAhYoBgAlASs1NQArNTUAAAD//wBc/+wEmAYEAiYAUgAAAQYAagwAAA23AwIAGiwTDCUBKzU1AAAAAAMAd//sBecFzQALABIAGAB0QEkVEBAGABYPBg8ZGhUQaVkqFZoVAkYVVhUC1hUBTBUBFSETFEgVHgxJGRUBGRUBA48VAQ8VjxUCCQYVFQMJCRNpWQkEAwxpWQMTAD8rABg/KxESADkYL19eXXFfXXErK11dcXErERIBOTkRMzMRMxEzMTABEAAhIAAREAAhIAABMjY3IRYWEyADISYmBef+mP6w/rD+mAFpAVEBUQFl/UijvRP9GBS3rP7ENwLgGbcC3f6V/noBhgFtAW0Bgf58/KfAvbTJA9v+pKmzAAAAAwBc/+wEmARzAA0AEwAZAGtAQRYREQcAFxAHEBobFhF7WRYiIiNIFiIZGkg5FkkWAqkWAV0WAUwWAQMcFgEEFhYDCg8UAQwGChRdWQoQAw5dWQMWAD8rABg/KwBfXl0REjkYL19dX11dXXErKysREgE5OREzMxEzETMxMAEQACEiJgI1EAAhMhYSATI3IRYWEyIHISYmBJj+4P7/ofaEAR4BA6H2hP3jwxz+Pg9uZMMeAcIObQIx/u/+zI0BCLABEgEwjP76/gDodHQCnOFwcQAAAP//AHf/7AXnB1YCJgJ7AAABBwBqAMUBUgAXQA0EAy4FJgQDBBkrBgAlASs1NQArNTUAAAD//wBc/+wEmAYEAiYCfAAAAQYAagwAAA23BAMAGiwHACUBKzU1AAAA//8ASP/sBNcHVgImAccAAAEHAGoAIwFSABdADQIBLgUmAgEBGSsECSUBKzU1ACs1NQAAAP//AEr/7AO8BgQCJgHnAAABBgBqlwAADbcCAQAYKg8VJQErNTUAAAD//wAU/+wFTgb+AiYBvQAAAQcBTQBGAVIAE0ALARgFJgEDGBcOACUBKzUAKzUAAAD//wAA/hQEjQWsAiYAXAAAAQYBTdwAAAu2AQMaGQAJJQErNQD//wAU/+wFTgdWAiYBvQAAAQcAagBGAVIAF0ANAgEqBSYCAQMVJw4AJQErNTUAKzU1AAAA//8AAP4UBI0GBAImAFwAAAEGAGrcAAANtwIBAxcpAAklASs1NQAAAP//ABT/7AVOB3MCJgG9AAABBwFTAM0BUgAXQA0CAScFJgIBdhYkDgAlASs1NQArNTUAAAD//wAA/hQEjQYhAiYAXAAAAQYBU1IAAA23AgFmGCYACSUBKzU1AAAA//8AbQAABRsHVgImAcEAAAEHAGoAVgFSABdADQIBKQUmAgEAFCYJEyUBKzU1ACs1NQAAAP//AHsAAASgBgQCJgHhAAABBgBqIwAADbcCAQMTJRIJJQErNTUAAAAAAQC4/lYEVAW2AAkAL0AYAwgFBgEGCAMKCwYiCQJpWQkDCANqWQgSAD8rABg/KwAYPxESARc5ETMRMzEwAREhESERIREhEQRU/ZoBK/7V/soFtv8A/FT9TAGqBbYAAAAAAQCg/m8DpAReAAkAMkAYAQUDCAUGCAYKCwYICQJgWQkPCANfWQgVAD8rABg/KwAYEMYREgE5OREzETMRMzEwARUhESERIREhEQOk/i0BEv7u/s8EXuX9Zv2QAZEEXgD//wC4AAAGhwdWAiYBxQAAAQcAagE1AVIAF0ANBAMtBSYEAwMYKgUXJQErNTUAKzU1AAAA//8AoAAABi0GBAImAeUAAAEHAGoA+gAAAA23BAMBFykJFiUBKzU1AP//AC/+FARQBbYCJgKYAAABBwN9AOwAAAAWsQEWuv/AAA0BALZIABYWBwclASsrNQABAAT+KQO+BF4AGwBvQD0VGQIMChMXFw4KGQgRCAoDHB0WDA0MglkTDw0fDb8NAxMDDQ0KDwAFX1kACg8SAQwGDxJdWQ8PChdfWQoVAD8rABg/KwBfXl0YEMQrERIAORgvX15dMysRADMREgEXOREzETMzETMRMzMRMzEwASInNRYzMjY1NSERIzUzESEVIRUhFSEVMxEUBgGaXUcyMDMz/s+JiQMx/gABTP60/pT+KRvVEzNEgwGi6wHR+Nnrw/6esaMAAQAA/hQFsgW2ABgAUUAqEhYCEw4MEA0TChYHBwoNDAQZGhMKEBAKCREOAwwSCRRpWQkSAAVrWQAjAD8rABg/KwAYPz8zEjk5ERI5ERIBFzkRMxEzETMRMxEzETMxMAEiJzUWMzI1NSMBASEBASEBASEBASERFAYEUmtNOzt7l/6s/qz+tAHl/joBVgE7ATUBTv41AUIBCLf+FBnwE6pMAin91wLyAsT98gIO/Sv+H/6HscIAAQAK/ikE4wReABkAUUAqExcCFA8NFBELFwgICw4NBBobFAsREQsKDwAFX1kAChIPDw0VChVfWQoVAD8rABg/PzMQxCsREgA5ORESORESARc5ETMRMzMRMxEzETMxMAEiJzUWMzI2NTUjAwMhAQEhExMhARMzERQGA8FdRzIwMzOq6+z+pgF7/pgBWtnbAVr+lOfjlP4pG9UTM0SDAX/+gQI7AiP+nAFk/d3+pP6esaMAAAAAAQAAAAAFVgW2ABEAZ0A8BgsLEwIPCg0HBAkEDQEQEQ8HEhMNBA8CChEAEWlZBxkAKQACACQUSaoAAUwAAQM+AAEEAAAPBQIDDA8SAD8zPzMSOS9fXV9dXStxMysRADMREjk5ERIBFzkRMxEzETMRMxEzMTATIQEhAQEhASEVIQEhAQEhASFxASn+hQFWATsBNQFO/osBJ/7TAZ7+nv6s/qz+tAGN/uQDaAJO/fICDv2y/v2WAin91wJqAAABAAoAAASWBF4AEQBnQDsJChEQAg8PEAcECg0GCwsNBAEQBRITDQQPAgoRABFgWQdvAAF/AI8A3wADygABACQNSQAADwUCDwwPFQA/Mz8zEjkvK11dcTMrEQAzERI5ORESARc5ETMRMxEzETMRMxEzETMxMBMzASETEyEBMxUjASEDAyEBI2bX/uABWtnbAVr+29nRAS7+pevs/qYBK88CqAG2/pwBZP5K5f49AX/+gQHDAAACAFwAAARiBbYACQASADBAGA4ABwQSABITFAMLaVkDAwgFAwgRaVkIEgA/KwAYPxI5LysREgE5OREzMxEzMTATNCQhMxEhESEgASMiBhUUFjMzXAE4AR57ATX+Vv2kAtFQtJOSnWgBydToAjH6SgKHWWVmZQAAAP//AFz/7ARxBhQCBgBHAAAAAgBc/+wGugW2ABsAJgBYQC8gAAcEJhANDSYAAycoDw4vDgINAw4WGQUDHWlZAAMBDAMDAxkFAwojGSNpWRMZEwA/MysRADMYPxI5L19eXSsREgA5GC9fXl0REgEXOREzETMzETMxMBM0JCEzESERFhYzMjY1ESERFAYjIiYnBgYjIiYBIyIGFRQWMzI2NVwBKgELcwE1A09WWk4BMfDtbMEnK6596O8CqEidiV1bVGIBttj3AjH7uUJBZnEBjf4tw85OPT9K6wGuaWxgZkE7AAIAXP/sBskGFAAgACwASEAmKgMPDAkkGBUVJAMDLS4WHggABg0ABihdWQYQGxJhWQAhXVkbABYAPzIrKwAYPysAGD8REjk5LxESARc5ETMRMzMzETMxMAUiABEQEjMyFzMmJjURIREUFjMyNjU1IREUBiMiJicGBicyNjc1NCYjIhEUFgJe9/712cPLagoHDwExUFhXSwEt6+h4mD4uxFpvZgRqccliFAEoARkBEAE2pCaPKgFm+2lLRmZx+f7BxM09TDdS84miIbaa/q6lpQABABn/7AagBcsAKABUQC0DBAQAHhANDR4aJAQpKg8OLw4CDQMOAxobGxpsWRsbEyYmIWtZJgQTCmlZExMAPysAGD8rERIAORgvKxESADkYL19eXRESARc5ETMRMzMRMzEwARQGBxUWFhUUFjMyNjURIREUBiMiJjU0JiMjNTMgNTQmIyIHJzYhMgQD9KaWsbZTVVlPATHw6er0w7mqqgFYa3GcmZvIAR/mAQ4Eb4nAJAYWq5FlWWZxAY3+LcXM5NpqbdnRTlhkzpC7AAABADn/7AZcBHMAKABcQDUSExMQAx0aGgMnCgQpKhsSJygoJ2JZACgQKFAoYCiAKJAoBgsDKCggDQ0GYFkNECAXYVkgFgA/KwAYPysREgA5GC9fXl0rERIAORgvERIBFzkRMxEzMxEzMTABMjY1NCYjIgYHJzY2MzIWFRQHFRYVFDMyNjU1IREUBiMiJjU0JiMjNQGTnodlck2yT1p414TL8tHtqFdLAS3r5N38koiaArA4PTY2JSLVLiagib05Cie9emZx+f7BxcyZjWVm0wAAAQAZ/lYFcwXLAB8AT0AqAwQEABYIDQoLCw0WEhsFICEDEhMTEmxZExMgHQsiHRhrWR0EDQhqWQ0SAD8rABg/KwAYPxESOS8rERIAORESARc5ETMRMxEzMxEzMTABFAYHFRYWFRUhESERIRE0JiMjNTMgNTQjIgcnNiEyBAQdppaxtgEr/tX+ytHItrYBde6npZvRASrxARgEb4nAJAYWq5Gg/UwBqgGqam3Z0aZkzpC7AAAAAQBO/m8FLQRzACIAXUAyEhMTHBADFxwZGhocAyEKBSMkGhwSISIiIWJZACIQIgILAyIiHA0NBmBZDRAcF19ZHBUAPysAGD8rERIAORgvX15dKxESADkYEMYREgEXOREzETMRMxEzETMxMAEyNjU0JiMiBgcnNjYzMhYVFAcVFhYVFSERIREhETQmIyM1AbKqkGp6TcNQWnfgitH80YFvARL+7v7XmaGkArA4PTY2JiHVLSegib05CiJ9ZWf9kAGRAUZOSdMAAAABABD/7AeWBbYAIQA6QB8ADwkGBg8YAyIjDwcvBwINAwcgEWlZIAMWG2lZDBYTAD8zKwAYPysAGC9fXl0REgEXOREzETMxMAEWFjMyNjURIREUBiMiJjURIQcCAgYjIic1FjMyNhISEyEFFAJPV1pOATLw6uvy/sMQPl+znlRAOjM1PjdbIANyAXdIQ2ZxAY3+LcXMyMMDPYb+Af5lqBb+FGEBBwJXAQsAAAAAAQAA/+wG4QReAB8ANUAbFwAPCQYPBiAhBx4RYFkeDwwDYVkVGmFZDBUWAD8zKysAGD8rABgvERIBOTkRMxEzMjEwARQWMzI2NTUhERQGIyImNREjAgIGIyInNRYzMjYSEyEEalBYV0sBLevk6+76IFyZfGpEMTE5TT0WAy8BeUpDZnH5/sHFzMjFAgD+if6PpSD0FKQBfwFPAAAAAQC4/+wHqgW2ABkAh0BSFhISEwAXDwkGBg8TAxobHwc/BwIDBwAWAQwGFhFpWUYWAdYWARIWAQMhFgGxFgEEoxYBTBYBOxYBGRYBAw8WjxYCCQYWFhMYFAMTEgwDaVkMEwA/KwAYPz8zEjkvX15dX11dXV1fXXFfcV1xKwBfXl0YL19dERIBFzkRMxEzMxEzETMxMAEUFjMyNjURIREUBiMiJicRIREhESERIREhBT1JVVVJATHr5ObrAv3m/soBNgIaATUBfUtGZnEBjf4txM3IwQEC/YkFtv3DAj0AAAABAKD/7AcEBF4AGQBkQDsBFxcYBQIUDgsLFBgDGhsMARZgWQEkHyBIugHKAQJmAfYBAgMBJA1JDwEBCgYBARgDGQ8YFREIYVkRFgA/KwAYPz8zEjkvX15dK19dXSsrABgvERIBFzkRMxEzMxEzETMxMAERIREhERYWMzI2NTUhERQGIyImJzUhESERAdEBlQEyAk5RVUkBLeni5+oC/mv+zwRe/lIBrv0ZSENmcfn+wcbLycJW/jMEXgAAAAEAd//sBfAFywAdAEVAJQ4CFQgCHBwdCAMeHwAdaVkPAAELAwAABQwMEmlZDAQFGGlZBRMAPysAGD8rERIAORgvX15dKxESARc5ETMRMxEzMTABIRUQACEgABE0EiQzIBcHJiYjIgIVFBYzMjY1NSEDNQK7/q/+u/6c/oGvAU3jARTka3K/aL3X2dOarv6LAzV7/pr+mAGKAWflAVS1a/o5Kv746uv+p5cHAAABAFz/7ATyBHMAGQBJQCcMAhkTEwcCGAcYGhsAGYJZDwAfAAITAwAABAoKEF1ZChAEFV1ZBBYAPysAGD8rERIAORgvX15dKxESATk5ETMRMxEzETMxMAEhFRAhIAAREAAhMhcHJiYjIgYVECEyNjUhApYCXP28/ub+yAFFASzixFxLtUijmwEVgZP+3AKYXf2xASoBEQEcATBW6iMnp7P+unRjAAEAKf/sBWIFtgAVAEBAIhQGAA8JBgYPEQMWFw8HLwcCDQMHFRESEWlZEgMMA2lZDBMAPysAGD8rEQAzGC9fXl0REgEXOREzETMRMzEwARQWMzI2NREhERQGIyImNREhESERIQLsS1ZYTAEx7ebr7v5zBFD+cwF9S0ZmcQGN/i3FzMu+Az8BAv7+AAAAAQAv/+wFRgReABUANkAbAQkDEgwJCRIUAxYXCgIUFRRgWRUPDwZhWQ8WAD8rABg/KxEAMxgvERIBFzkRMxEzETMxMAEVIREUFjMyNjU1IREUBiMiJicRITUEPf6SUFhWTAEt6+Tp7gL+kQRe5f4ES0Zmcfn+wcXMx8QCAuUAAAABAFj/7ATRBcsAKABqQD0mJSUNFiINAAcdHRIAIgQpKiYTEBATa1mZEKkQAlYQARAkDUkqEAEDDxABCgUQEB8EBAppWQQEHxlpWR8TAD8rABg/KxESADkYL19eXV9dK11dKxESADkREgEXOREzETMRMxEzETMxMBM0NjYzMgQXByYjIgYVFBYzMxUjIgYVFBYzMiQ3EQYhICQ1NDY3NSYmhYr6n7ABA3aHwM6FhdXoeonq66aqgAEJYcH+v/7f/rbMt5+3BGBpp1tDT+V3UUtmWPJoYWdhMS/+7U/qypK3EwYZuf//AE7/7AQlBHMCBgGCAAAAAQAQ/hQGYgW2ACEAQEAjAh0KHwgIChMDIiMbDGlZGwMKHWlZChIRFmlZERMABWtZACMAPysAGD8rABg/KwAYPysREgEXOREzETMzMTABIic1FjMyNjU1IREhBwICBiciJzUWMzI2EhITIREhERQGBQJrTTs8QDr+zf6aED5ftptUQDozNT43WyADmwElt/4UGfATVlRMBLSG/gH+Y6gCFv4UYQEHAlcBC/tK/oexwgABAAD+KQWFBF4AHwBBQCISAhsKHQgKCCAhChtfWQoVAAVfWQAgGQxgWRkPEBVhWRAWAD8rABg/KwAYEMQrABg/KxESATk5ETMRMzMyMTABIic1FjMyNjU1IREhAgIGIyInNRYzMjYSEyERMxEUBgRiW0k0LzMz/tH+5yBcmXxqRDExOU09FgNO/JT+KRvVEzNEgwN5/on+j6Ug9BSkAX8BT/yB/p6xo///AAD+UgWFBbwCJgAkAAABBwJkBUQAAAALtgIADhIEByUBKzUAAAD//wBW/lIEOwR1AiYARAAAAQcCZATHAAAADrkAAv/+tCMnCBglASs1//8AAAAABYUH9gImACQAAAEHAmMFIwFSABNACwIAEhEFBiUCEgUmACs1ASs1AAAA//8AVv/sBDsGpAImAEQAAAEHAmMEywAAAAu2AgswIxIXJQErNQAAAP//AAAAAAWFB9ECJgAkAAABBwN0BSEBUgAXQA0DAgAUDgUGJQMCFAUmACs1NQErNTUAAAD//wBW/+wE/gZ/AiYARAAAAQcDdATFAAAAELEDArj/67QpIxIXJQErNTUAAP//AAAAAAWFB9ECJgAkAAABBwN1BR8BUgAXQA0DAgAbFQUGJQMCGwUmACs1NQErNTUAAAD////T/+wEOwZ/AiYARAAAAQcDdQTHAAAAELEDArj/77QwKhIXJQErNTUAAP//AAAAAAWFCEoCJgAkAAABBwN2BSEBUgAXQA0DAgAnIQUGJQMCJwUmACs1NQErNTUAAAD//wBW/+wEqAb4AiYARAAAAQcDdgTJAAAAELEDArj/77Q8NhIXJQErNTUAAAAEAAAAAAWFCG8ABwANABsAMwB7QAsHDwQODQg1NCUtHLj/wEAVCQxIHBwwIQ8oASgRDwAXEBeAFwMXuP/AQCISF0gfFwEXMBRAFAIPFAEJAxQFCwQFDQJpWQ0NBQAEEgUDAD8/MxI5LysREgA5GBDWX15dccRdK3E5OS9dMzM5LyszMxESATk5ETMRMzEwIQMhAyEBIQEBAiYnBgMBIyYnBgcjNTY3IRYWFwMiLgIjIgYHIzY2MzIeAjMyNjczBgYEN2r962r+sgIEAXsCBv3+kyUIIZwCMo6NWlOVjapCARIwgDzkLFBHPxwsKA19C3NgMVFHPR4qKQp9DnIBXP6kBbz6RAJgAdl8JID+BwPLR1FKThukYEWEOwE9GyEcKS9ufhwhGywsc3kAAP//AFb/7AQ7Bx0CJgBEAAABBwN3BMUAAAAQsQMCuP/vtCkjEhclASs1NQAA//8AAP5SBYUHcwImACQAAAAnAUsAWAFSAQcCZAVEAAAAG0ASAhIFJgMBHCAlByUCAxMbBQYlKzUrNQArNQAAAP//AFb+UgQ7BiACJgBEAAAAJgFL+/8BBwJkBNMAAAAWtwMKMTUIGCUCuP/rtCgvEhclKzUrNQAA//8AAAAABYUIEwImACQAAAEHA3gFKQFSABdADQMCAxwWBQYlAwIZBSYAKzU1ASs1NQAAAP//AFb/7AQ7BsECJgBEAAABBwN4BM0AAAAQsQMCuP/stDErEhclASs1NQAA//8AAAAABYUIEwImACQAAAEHA3kFJwFSABdADQMCABwWBQYlAwIZBSYAKzU1ASs1NQAAAP//AFb/7AQ7BsECJgBEAAABBwN5BMsAAAAQsQMCuP/qtDErEhclASs1NQAA//8AAAAABYUIWAImACQAAAEHA3oFJwFSABdADQMCACchBQYlAwIkBSYAKzU1ASs1NQAAAP//AFb/7AQ7BwYCJgBEAAABBwN6BM0AAAAQsQMCuP/stDw2EhclASs1NQAAAAQAAAAABYUIbwAHAA0AGgAyAI9AFQcPBA4NCDQz5SwBtizGLNYsAyQsG7j/wEBCCQxIGxvqIAG5IMkg2SADLyAPJwEnERMAGhAaYBpwGoAaBZAaoBoCHxoBGkAPFwEJAxcFCwQFDQJpWQ0NBQAEEgUDAD8/MxI5LysREgA5GBDWX15dGs1dXXEyMy9dMzNdXTkvKzMzXV0REgE5OREzETMxMCEDIQMhASEBAQImJwYDAxYWMzI3MwYGIyImJyUiLgIjIgYHIzY2MzIeAjMyNjczBgYEN2r962r+sgIEAXsCBv3+kyUIIZwUB2pixQ6VCbilo8ELAgQtT0c/HCwoDX0Lc2AxUUc9HiopCnwLcgFc/qQFvPpEAmAB2XwkgP4HBPQ7RoGSl5+KLxshHCkvbn4cIRssLHB8//8AVv/sBDsHHQImAEQAAAEHA3sEzQAAABCxAwK4/+y0LykSFyUBKzU1AAD//wAA/lIFhQd9AiYAJAAAACcBTgBWAVIBBwJkBUQAAAAbQBICEQUmAwEcICUHJQIBFA4FBiUrNSs1ACs1AAAA//8AVv5SBDsGKwImAEQAAAAnAmQEyQAAAQYBTvsAABa3AgAjJwgYJQO4/+u0MiwSFyUrNSs1AAD//wC4/lIEAgW2AiYAKAAAAQcCZATbAAAADrkAAf/9tAwQAgslASs1//8AXP5SBGIEcwImAEgAAAEHAmQE3QAAAA65AAL//bQcIAoRJQErNf//ALgAAAQCB/YCJgAoAAABBwJjBMUBUgATQAsBEAUmASQaDAILJQErNQArNQAAAP//AFz/7ARiBqQCJgBIAAABBwJjBNsAAAALtgI4KhwKESUBKzUAAAD//wC4AAAEAgdgAiYAKAAAAQcBUv/vAVIAE0ALARAFJgEAEBwCCyUBKzUAKzUAAAD//wBc/+wEYgYOAiYASAAAAQYBUvsAAAu2AgkgLAoRJQErNQD//wC4AAAE9QfRAiYAKAAAAQcDdAS8AVIAF0ANAgEAEgwCAyUCARIFJgArNTUBKzU1AAAA//8AXP/sBQQGfwImAEgAAAEHA3QEywAAAA23AwIOIhwKECUBKzU1AP///80AAAQCB9ECJgAoAAABBwN1BMEBUgAXQA0CAQgZEwIDJQIBGQUmACs1NQErNTUAAAD////f/+wEYgZ/AiYASAAAAQcDdQTTAAAADbcDAhgpIgoQJQErNTUA//8AuAAABJsISgImACgAAAEHA3YEvAFSABdADQIBACUfAgMlAgElBSYAKzU1ASs1NQAAAP//AFz/7ASqBvgCJgBIAAABBwN2BMsAAAANtwMCDjUvChAlASs1NQAAAwC4AAAEAghvAAsAGQAxAMJADwYKCgEEAAAIAQMzMiIrGrj/wEAXCQxIGhouHw8mAQkmDw0AFXAVAvAVARW4/8BATRIXSA8VAQoVABIQEiASAxsDEgIGCWlZRgYB1gYBEgYBAyEGAbEGAQRMBgGjBgEGHgxJGQYBAw8GjwYCCQYGBgECAgVpWQIDAQppWQESAD8rABg/KxESADkYL19eXV9dK11dX11xX3FdcSsAGBDWX15dxF5dK11xOTkvXl0zMzkvKzMzERIBFzkRMxEzETMxMCEhESEVIREhFSERIQMjJicGByM1NjchFhYXAyIuAiMiBgcjNjYzMh4CMzI2NzMGBgQC/LYDSv3sAe/+EQIUK46NWlOVjapCARIwgDzkLFBHPxwsKA19C3NgMVFHPR4qKQp9DnIFtv7+v/7+hwUrR1FKThukYEWEOwE9GyEcKS9ufhwhGywsc3n//wBc/+wEYgcdAiYASAAAAQcDdwTLAAAADbcDAhIjKQoRJQErNTUA//8Aq/5SBBAHcwImACgAAAAnAUv/8QFSAQcCZATbAAAAHrQBEAUmArj//UAMGh4BACUBABEZAgMlKzUrNQArNf//AFz+VARiBiECJgBIAAAAJgFL8wABBwJkBN0AAgAXuQAD//1ADCouChAlAgAhKQoQJSs1KzUA//8AQgAAAtsH9gImACwAAAEHAmMD7gFSABNACwEQBSYBHBoMBgslASs1ACs1AAAA//8AdQAAAjwGpAImAPMAAAEHAmMDnAAAAAu2AQAIBwIDJQErNQAAAP//AEL+UgLbBbYCJgAsAAABBwJkBA4AAAALtgEADBADCiUBKzUAAAD//wCR/lIB3wYUAiYATAAAAQcCZAO4AAAADrkAAv/+tA0RAAQlASs1//8Ad/5SBecFzQImADIAAAEHAmQFsAAAAAu2AgAWGgYAJQErNQAAAP//AFz+UgSYBHMCJgBSAAABBwJkBPoAAAAOuQAC//+0Gh4TDCUBKzX//wB3/+wF5wf2AiYAMgAAAQcCYwWRAVIAE0ALAhoFJgIeJBYGACUBKzUAKzUAAAD//wBc/+wEmAakAiYAUgAAAQcCYwTbAAAAC7YCHSgaEwwlASs1AAAA//8Ad//sBecH0QImADIAAAEHA3QFhQFSABqxAwK4//hAChwWBgAlAwIcBSYAKzU1ASs1Nf//AFz/7AUKBn8CJgBSAAABBwN0BNEAAAAQsQMCuP/5tCAaEwwlASs1NQAA//8Ad//sBecH0QImADIAAAEHA3UFhwFSABqxAwK4//xACiMdBgAlAwIjBSYAKzU1ASs1Nf///9//7ASYBn8CJgBSAAABBwN1BNMAAAAQsQMCuP/9tCchEwwlASs1NQAA//8Ad//sBecISgImADIAAAEHA3YFhQFSABqxAwK4//hACi8pBgAlAwIvBSYAKzU1ASs1Nf//AFz/7ASwBvgCJgBSAAABBwN2BNEAAAAQsQMCuP/5tDMtEwwlASs1NQAAAAQAd//sBecIbwALABUAIwA7AHhACwwGABEGET08LTUkuP/AQBUJDEgkJDgpDzABMBkXAB8QH4AfAx+4/8BAIxIXSB8fAR9fHG8cfxwDDxwfHAIJAxwJCRNpWQkEAw9pWQMTAD8rABg/KwAYENZfXl1xxF0rcTk5L10zMzkvKzMzERIBOTkRMxEzMTABEAAhIAAREAAhIAABFBYzIBEQISIGASMmJwYHIzU2NyEWFhcDIi4CIyIGByM2NjMyHgIzMjY3MwYGBef+mP6w/rD+mAFpAVEBUQFl+9W6uQFz/o+5vALmjo1aU5WNqkIBEjCAPOQsUEc/HCwoDX0Lc2AxUUc9HiopCn0OcgLd/pX+egGGAW0BbQGB/nz+lPX4Ae0B7vkCWUdRSk4bpGBFhDsBPRshHCkvbn4cIRssLHN5AAD//wBc/+wEmAcdAiYAUgAAAQcDdwTVAAAADbcDAgEhJxMMJQErNTUA//8Ad/5SBecHcwImADIAAAAnAmQFsAAAAQcBSwDBAVIAHkAMAyMFJgIAFhoGACUDuP/+tCQsBgAlKzUrNQArNf//AFz+UgSYBiECJgBSAAAAJwJkBPoAAAEGAUsMAAAZuQAC//+1Gh4TDCUDuP/+tCgwEwwlKzUrNQAAAP//AHf/7AbXB3MCJgJcAAABBwB2ARkBUgATQAsCVh4iBgAlAiYFJgArNQErNQAAAP//AFz/7AXNBiECJgJdAAABBgB2fQAAC7YCbyMnBwAlASs1AP//AHf/7AbXB3MCJgJcAAABBwBDAGQBUgAWuQAC/6FACSEmBgAlAiYFJgArNQErNf//AFz/7AXNBiECJgJdAAABBgBDpQAADrkAAv+XtCcrBwAlASs1AAD//wB3/+wG1wf2AiYCXAAAAQcCYwWmAVIAE0ALAjMrHgYAJQIiBSYAKzUBKzUAAAD//wBc/+wFzQakAiYCXQAAAQcCYwTnAAAAC7YCKTAjBwAlASs1AAAA//8Ad//sBtcHYAImAlwAAAEHAVIAywFSABNACwIJIi4GACUCIgUmACs1ASs1AAAA//8AXP/sBc0GDgImAl0AAAEGAVIUAAALtgIHJzMHACUBKzUA//8Ad/5SBtcGFAImAlwAAAEHAmQFsgAAAAu2AgAeIgYAJQErNQAAAP//AFz+UgXNBQYCJgJdAAABBwJkBP4AAAALtgIDIycHACUBKzUAAAD//wCu/lIFXgW2AiYAOAAAAQcCZAWHAAAAC7YBABMXCQElASs1AAAA//8Amv5SBKIEXgImAFgAAAEHAmQFHwAAAAu2AQAVGQkUJQErNQAAAP//AK7/7AVeB/YCJgA4AAABBwJjBV4BUgATQAsBFwUmARQhEwkBJQErNQArNQAAAP//AJr/7ASiBqQCJgBYAAABBwJjBPgAAAALtgEWIxUJFCUBKzUAAAD//wCu/+wHKQdzAiYCXgAAAQcAdgEXAVIAE0ALAX0dIRMAJQElBSYAKzUBKzUAAAD//wCa/+wGcwYhAiYCXwAAAQcAdgCqAAAAC7YBeB8jCRMlASs1AAAA//8Arv/sBykHcwImAl4AAAEHAEMAFAFSABa5AAH/ekAJICUTACUBJQUmACs1ASs1//8Amv/sBnMGIQImAl8AAAEGAEOjAAAOuQAB/3G0IycJEyUBKzUAAP//AK7/7AcpB/YCJgJeAAABBwJjBWQBUgATQAsBGiodEwAlASEFJgArNQErNQAAAP//AJr/7AZzBqQCJgJfAAABBwJjBP4AAAALtgEcLB8JEyUBKzUAAAD//wCu/+wHKQdgAiYCXgAAAQcBUgCaAVIAE0ALAQAhLRMAJQEhBSYAKzUBKzUAAAD//wCa/+wGcwYOAiYCXwAAAQYBUjMAAAu2AQAjLwkTJQErNQD//wCu/lIHKQYUAiYCXgAAAQcCZAV9AAAADrkAAf/2tB0hEgslASs1//8Amv5SBnMFBgImAl8AAAEHAmQFFwAAAA65AAH/+LQfIwgeJQErNf//AAD+UgT+BbYCJgA8AAABBwJkBP4AAAALtgEACQ0FBCUBKzUAAAD//wAA/hQEjQReAiYAXAAAAQcCZAZWAAAAC7YBexcXCgolASs1AAAA//8AAAAABP4H9gImADwAAAEHAmME2QFSABNACwENBSYBFhcJBwIlASs1ACs1AAAA//8AAP4UBI0GpAImAFwAAAEHAmMEogAAAAu2ARglFwAJJQErNQAAAP//AAAAAAT+B2ACJgA8AAABBwFSABIBUgATQAsBDQUmAQANGQcCJQErNQArNQAAAP//AAD+FASNBg4CJgBcAAABBgFS4AAAC7YBBxsnAAklASs1AP//AFz+vAUMBhQCJgDTAAABBwBCANkAAAALtgIYKisDFiUBKzUAAAAAAvt/BNn+5wYhAAkAEwAVQAoEDoAADwpfCgIKAC9dMxrNMjEwASYmJzUhFhYXFSEmJic1IRYWFxX+Rj7aIgEtIWQp/dFJ0R8BLSFkKQTZMcs3FUitOBs5yDIVSK04GwAAAvwtBNkAOQZ/AA0AFQAhQBIQQAoNSBAVFQMKgAEPBl8GAgYAL10zGs05OS/EKzEwAyMmJwYHIzU2NyEWFhcnNjczFQYHI+micGNyYaJwZwE7NYccWVU18UOgmATZS1tlQRuClk6rH8JbbhVZdQAAAAL7DATZ/xkGfwANABUAJUAUEkAKDUgSQA8PCAoCgAIPDV8NAg0AL10zGsw5OTkvGs0rMTABNjchFhYXFSMmJwYHIzcjJic1MxYX/C9wZwE8MX4oomFyammiWJekQPI2UwT0gpZIpCwbQWVgRsN3VxVwWQAAAvwtBNn/3wb4ABIAIAAtQBkCBQULTxBfEAIQAAQBBAQWHIAUDxlfGQIZAC9dMxrMOTkvXcRdMjkvMzEwAxQHByMnNjY1NCYjIgc1NjMyFgMjJicGByM1NjchFhYXIX0Gfwo3QiUrIyUWRl5xyKJwY3JhonBnATs1hxwGYHIZPXQCHx0VHgp/Bkj+KUtbZUEbgpZOqx8AAAAC/DEE2f8bBx0ADQAlADdAIyITrxq/Gs8aAxpACQxIGh8XGgMOQAwRSA4DCYABDwZfBgIGAC9dMxrcOcYrFzIvK10zMzEwAyMmJwYHIzU2NyEWFhcDIi4CIyIGByM2NjMyHgIzMjY3MwYG5Y6NWlOVjapCARIwgDzkLFBHPxwsKA19C3NgMVFHPR4qKQp9DnIE2UdRSk4bpGBFhDsBPRshHCkvbn4cIRssLHN5AAAAAAL8MQTZ/wYGwQAHABUAMUAe3wIBAgdAERVIAAcwBwIHBxIVcA4BDoASDwtfCwILAC9dMxrNXTIROS9dK8xdMTABNjczFQYHIyUGBiMiJiczFhYzMjY3/TdGL91cc4MBzwvDoKW6CJYIc1hYcgkF+GlgFW5hTp60rKZXU15MAAAAAvwxBNn/BgbBAAcAFQAxQB7fBAEEAUARFUgAATABAgEBEhVwDgEOgBIPC18LAgsAL10zGs1dMhE5L10rzV0xMAEjJic1MxYXJQYGIyImJzMWFjMyNjf+AINqZd0vRgEGC8OgpboIlghzWFhyCQXdVXoVYGkznrSspldTXkwAAAAC/DEE2f8GBwYAEgAgAENAKwMGAwMLEL8FzwUCBUANEEgwBUAFAgAFEAUgBQMFBSBwGQEZgB0PFl8WAhYAL10zGs1dMjIvXXErXcQyOS8RMzEwARQGBwcjJzY2NTQjIgc1NjMyFhcGBiMiJiczFhYzMjY3/jEyNgZrCjMnOzUdFkZWZNULw6ClugiWCHNYWHIJBn80QRIpbgkYGSkIaAZDmJ60rKZXU15MAAAAAvwxBNn/FAcdAAwAJAA3QCMSIa8ZvxnPGQMZQAkMSBkWHhkDDUAKFUgNBQyAAw8JXwkCCQAvXTMa3TLGKxcyLytdMzMxMAEWFjMyNzMGBiMiJiclIi4CIyIGByM2NjMyHgIzMjY3MwYG/MsHamLFDpUJuKWjwQsCBC1PRz8cLCgNfQtzYDFRRz0eKikKfAtyBgI7RoGSl5+KLxshHCkvbn4cIRssLHB8AAAAAAEACv4UAaAAAAASABtACwMNCAAAExQQCxsDAC8/MxESATkRMzMzMTAXNCYnMx4CFRQGIyInNRYzMjbNTkazT0IjinBKUjw3Iy3jNG1CPEtQL2d/F7ISKAABABD+FAIpAQAADAAZQAoCCgcHDQ4FABsIAC8/MhESATkRMzMxMBMiJzUWMzI1ESERFAbJZFU7PHsBJ7f+FBnwE6oBTP6HscIAAAD//wAp/hQEeQW2AiYANwAAAQcAegGcAAAAC7YBBRYXAQAlASs1AAAA//8AL/4UAzcFTAImAFcAAAEHAHoBQgAAAAu2AQQcFggDJQErNQAAAAACAAT/7AS0BhQAGgAlAFxAMhQkDRgLAyQLJCYnCBkGABUNDg1lWRIODgAQAAsVABtdWQAAEAAgAAMJAwAPBiFdWQYWAD8rABg/X15dKwAYPz8SOS8zKxEAMxESOTkREgE5OREzETMzETMxMAEyEhEQAiMiJyMHIxEjNTM1IRUhFSEVFAczNhciAxUUFjMyNjUQAw7Q1ufHxXAVM+mcnAExATv+xQwMa3DVBmt0Xm8EXv7l/u7+6/7Qj3sErMehoccWQpym9P7VDLScraUBNQAAAwAKAAAF7gW2ABsAIwAsAKZAYxMUFCEECSQdHRsQKBchISgbCQQtLgAHEAcCEwMHBxsMExwkJBxrWYAkkCQCRiQB1iQBJCQbSSQkFElMJAEDrCQBBDokARkkARkkAQMPJAEJBiQkGwwsAQwBaVkMAxsdaVkbEgA/KwAYPysRADMREjkYL19eXV9dcV1fXV9dKytdcXErERIAORESORgvX15dERIBFzkRMxEzETMRMxEzETMRMzEwASMiBhUUFyMmNTQ2MyEgBBUUBgcVFhYVFAQjIQERMzI2NTQhJzMyNjU0JiMjAbI1QTwU8RnDuAH0ATcBGXtnjHv+3/j93QE1y4B6/vzBtX5xfISkBLhDLy41NUKntbHBgqkRCh6rjcjgAn3+g2JltvZOWlRJAAAA//8AuAAABL4FtgIGAasAAAACAKD/7AS0BhQAFAAfAENAIg4DEgsDHgseICESCQYACxUMD2BZDAAAFV1ZAA8GG11ZBhYAPysAGD8rABg/KwAYPxESOTkREgE5OREzETMRMzEwATISERACIyInIwcjESEVIRUUBzM2FyIDFRQWMzI2NRADDtDW58fFcBUz6QOm/YsMDGtw1QZrdF5vBF7+5f7u/uv+0I97BhTlmUKcpvT+1Qy0nK2lATUAAAAAAgCu/+wE7gW2AAsAFQBYQDQIDAwFABEFERYXCBVpWTAIoAgC4ggBA4EIAQRdCAEFFQgBAwhzCAIMAwgIFgYDAw5pWQMTAD8rABg/EjkvX15dXV9dX11fXXErERIBOTkRMxEzETMxMAEUBCEgEREhETMgBAUUMzI2NTQmIyME7v7l/vj94wE1tQEeATj89eB2e5C3igG+4PIBwgQI/c/s29VtaGteAAAAAgCa/+wEtgYUABEAHgAzQBkLBgAcBhwfIAsDDwcADxJdWQ8QAxldWQMWAD8rABg/KwAYPxESORESATk5ETMRMzEwARAAIyAAEREhERQHMzY2MzISJSIGFRUUFjMyNjU0JgS2/uvz/wD+7AExDQ01p2nG4P34eGttcGxrcAIv/u/+zgEmARED8f6WOKVSVP7MQJmSNaSao6uppwAAAQBI/+wEogXLABgAJkAUCRYWDwQDGRoGAGlZBgQME2lZDBMAPysAGD8rERIBFzkRMzEwASIGByc2MyAAERAAISImJxEWFjMyEjU0AgHpV51JZMr4ATkBX/6e/rpvuGCEmU2+x8cEyTon/Gf+dP6k/pn+cCMoAQQuHwED7OoBAgABAHf/7AWmBwgAIgBHQCcaJAgfAw0fEw0TIyQWHGtZDxZvFn8WAwkDFhERAGlZEQQKBWlZChMAPysAGD8rABgQxF9eXSsREgE5OREzETMRMxEzMTABIgIVECEyNxEGIyAAETQSJDMyFzY2MzIWFxUmIyIVFQcmJgMlr8ABb5rbtN7+wf6upgE30V9XAqGZLk4TOThkZFKmBMn++ev+F03+/EsBgwFq5AFXtxelrxUK6RRyUugnOgAAAQBc/+wEsgYfACEAOkAeDiMfExkCEwcCByIjCxBdWQsBBRddWQUQABtdWQAWAD8rABg/KwAYPysREgE5OREzETMRMxEzMTAFIBEQACEyFzU0NjMyFxUmIyIVFQcmJiMiERAzMjY3EQYGAmb99gEcAQk7K6GdTz45N2VaSHw+7u5YlktKlxQCPQEdAS0JVKy1H+kUc5viHSX+rv64LzL++y8kAP//AC8AAAV1BbYCBgCSAAAAAgAKAAAGbwW2ABQAGwBGQCUJDhkEABUVBA4DHB0ADBAMAhMDDAwEERgGEQZpWREDBBlpWQQSAD8rABg/KxEAMxESORgvX15dERIBFzkRMxEzETMxMAEQACEhESMiBhUUFyMmNTQ2MyEgAAEQISMRMyAGb/5l/nz+YjVBPBTxGcO4AfgBZgGM/r7+YKaGAcAC6f6X/oAEuEMvLjU1Qqe1/ob+pQHX/EgAAgBcAAAEYgW2AAwAFQBhQDkBEQgEDBUIFRYXCw5pWTALoAsC4gsBA4ELAQRdCwEFFQsBAwtzCwIMAwsLBQICAWlZAgMFFGlZBRIAPysAGD8rERIAORgvX15dXV9dX11fXXErERIBOTkRMzMRMzMxMAEhESERISAkNTQkITMVIyIGFRQWMzMDLf2aA5v+Pv7e/t4BPQEjcVC1kpKdaAS2AQD6St3k1u7+XGRmYwAAAAACAFz/7ARxBhQAFAAhAEVAJA0fAxAJGBgLAwMiIxIIAAYRFQ4NYFkOAAYcXVkGDwAVXVkAFgA/KwAYPysAGD8rABg/ERI5ORESARc5ETMzETMzMTAFIgIREBIzMhczJjU1ITUhESMnIwYnMjY3NTQmIyIGFRQWAgLF4erE1mwKF/2MA6bqOw1oanVtBXF7ZnFyFAEyAQ8BCQEopIBgleX57JGl84ijIamSopulpQAAAAIAXP/sBJgGHwAdACgAQ0AiAA8IHhUCDxsjIw8VAykqEg8VACACHhgmXVkYAQUMX1kFFgA/KwAYPysREgA5ORESORESARc5ETMRMxEzMxEzMTABFhUUBiMiJic3FhYzMjY1NCYnJiY1EAAhMgQVFAYBFBc2NjU0JiMiBgMO7vHWbdCQeV2sWEpJioq5rAEaAQn0ASXA/bvJd4t6Z3B6AnWYvpCjLUHXLTc2LjZpR132oAEDARL407bvAXDCax+2gWZ7iAABAHkAAAPDBbYACwBwQEUHCwoFAQEDCwMMDQQDaVlGBAHWBAESBAEDIQQBsQQBBEwEAaMEAQQeDEkZBAEDDwSPBAIJBgQECwgIB2lZCAMLAGlZCxIAPysAGD8rERIAORgvX15dX10rXV1fXXFfcV1xKxESARc5ETMzETMxMBMhESE1IREhNSERIXkCFP4RAe/97ANK/LYBAAF5/gFB/vpKAAAA//8ApP/sBhIFzQIGAt4AAAABAFj/7ATRBcsAJQB6QEkkIyMMFSAMAAYbGxEAIAQmJyQSDw8Sa1kxDwEEGA8BRQ8BxQ8BVg8BDyQNSSoPqg8CAw8Pnw8CCgUPDx0DAwlrWQMEHRdrWR0TAD8rABg/KxESADkYL19eXV9dK11dcXFfcSsREgA5ERIBFzkRMxEzETMRMxEzMTATNCQzMgQXByYjIgYVFBYzMxUjIgYVFCEyNjcRBiEgJDU0Njc1JIUBLPWaAQaLh8DOhYXT6nqJ5u8BcWr/YMP+tv7d/sPaxP6PBEyv0EdZz3dZS2Nb8mBp1zIu/vxP5syWthIGNwAAAAAB/9v+FAP+BbYAEwBWQDIFEAAACw4SCwMUFRATaVnIEAFZEAEMEAENEB4MSQ8QAQ8DEBAUDAwPaVkMAwMIa1kDIwA/KwAYPysREgA5GC9fXl0rXl1dXSsREgEXOREzETMyMTAFFAYjIic1FjMyNjURIRUhESEVIQHpuLJYTDM+MDwDRv3rAfD+EIOzth/qFT0+BjP+/of9AAEAd//sBfwHCAAmAGRAOBQoJiQfCAIkGQ0NJAgDJygQFmtZDxBvEH8QAwkDEAsAJmlZDQABCwQAAAULCxxpWQsEBSJpWQUTAD8rABg/KxESADkYL19eXSsAGBDEX15dKxESARc5ETMRMxEzETMRMzEwASERBgYjIAAREAAhMhc2NjMyFhcVJiMiFRUHJiMiAhUUFjMyNxEhAuMCRI35gv61/qMBlQFnZ1cCpJYuThM5OGRpoK3J8sO6YWT+6wM1/QouJQGFAWwBYgGME6SsFQrpFHJE5VD+8uTu+xQBMQAAAgAA/hQFMwW2ABYAIAA4QBwBIg4dCgINBBcXGg0KBCEiGhMHAA4DBx9uWQcjAD8rABg/MxI5ORESARc5ETMRMxEzMhEzMTABIQESFRQGIyImNTQ2NwEhExYWFzY2NwM0JicGBhUUMzID9AE//fKNn4aGmks8/gwBP/gUNgwTPBIZJSElIEVGBbb7Rv7mqYWgnItm24QEtv2DMLc1S6co/AIujT9FhC1yAAAAAAEAoP/sB0wGFAAiAEVAJBwZFRUWAA4JBgYOFgMjJBwWHxcABw8WFR8RXVkfEAsDXVkLFgA/KwAYPysAGD8/PxESORESARc5ETMRMxEzETMzMTABFBYzMjY1ESERECEgJjU1NCMiBhURIREhERQHBzM2MzIWFQSTX2RlYAEx/gr+9+usd27+zwExBwcQa9K7yAHniH6ImwJa/Yv+A97w0/Kwwf3yBhT+wyWJWqTYzAAAAQCu/+wDJQW2AAwAIUAPBwEBCwsNDgwDCQRpWQkTAD8rABg/ERIBOREzETMxMAERFBYzMjcVBiMgEREB8kk8TWFtm/6RBbb7z0hNIvMzAX4ETAAAAQA3AAAC5QW2ABMAj0BUBgoODgwTAw8PEQgMDAERERQVCxMAE2lZCF0AAUwAARgAAb8A3wACKgABqgABbgABTAABAB4MSRkAAQMPAAEJBgAADwQGAwQDblkEAw0QDxBuWQ8SAD8rEQAzGD8rEQAzERI5GC9fXl1fXStdXV1xXXFxcTMrEQAzERIBOREzMxEzETMRMzMRMxEzMzEwEzMRJzUhFQcRMxUjERcVITU3ESM3vbICmbK8vLL9Z7K9A1oBWlKwsFL+pv7+plKwsFIBWgAAAAABALgAAAVQBc0AFgBAQCARAAAYCAQEBRYCBQIXGAIWCAMDAwUGAwEFEg4TaVkOBAA/KwAYPzM/EjkRFzMREgE5OREzETMRMxEzETMxMCEhAQcRIREhETc3PgIzMhcVJiMiBwMFUP6g/oGD/soBNoOmXnF3RVhBLjRVQvYCaF799gW2/V673X5rOB/0E1j+ugABAKAAAAT2BiMAFwBAQCADBQUZEQcXCgQHCgcYGQcEAAMICAoCDwYKFQ4TXVkOAQA/KwAYPzM/EjkRFzMREgE5OREzETMRMxEzETMxMAE3ASEBASEBBxEhETQ2MzIXFSYjIhURBwHFhQE5AVj+RAHX/qD+voP+z7SyVEw5OGQQAmCqAVT+G/2HAcVp/qQEw6y0H+kUc/6i/gAAAAABABQAAAKPBhQACwBCQCMCBAcFAAQECQUFDA0DBwgHYFkAuQgBqAgBKQgBCAgFCgAFFQA/PxI5L11dXTMrEQAzERIBOREzMxEzETMRMzEwATMVIxEhESM1MxEhAemmpv7PpKQBMQNc5f2JAnflArgAAQAI/+wE4QYhACcAd0BGAAoTARIQGhABAgUECgcoKRAFEQQTAhIDEhEEAwADEAMwA0ADgAOgA7ADwAMICQMRAxEDAQEjIw0AFQ0IYVkNAR0XYVkdFgA/KwAYPysAGD8SOREzETk5Ly9fXl0RMxEzERI5ORESOTkREgEXOREzETMRMzEwMwEnByc3JiYjIgc1NjMyFhc3FwcBFhYzMjcVBgYjIiYnAiYnIwYHAwgB2RbNM6ofSC8yNE9XhrQ/vjOqARUlTDchJBdyJ26MKXIrDQYuHs4EITk9qDMZEA38EWJtO6oz/QZoYgrsDBJsdwFDhTSaTP4bAAABAK7/7AePBbYAIAA9QB8bGAIgCgwMByAYBCEiDRMLCAAZAwsSBB0VHWlZEBUTAD8zKxEAMxg/PzMzEjk5ERIBFzkRMxEzETMxMAEhERQzMjY1ESERIycjBgYjIicjBiMiJjURIREUMzI2NQODATWye3MBN+krEzK0cuxqDnHty9UBNbN8cQW2/C/1q8sDUPpKnFVbuLjUzQQp/C/1qKwAAf/b/hQFyQW2ABkANkAaCwMGBhEAFxURFRobFAIBGBIDARIJDmtZCSMAPysAGD8/MxI5ORESATk5ETMzETMRMzIxMCEhASMSFREUBiMiJzUWMzI2NREhATMCNREhBcn+dv2ECRO0qEpMMz4wPAGHAnsHDwEXBFL+2338za+6H+oVPT4GM/u5AR12ArQAAAD//wCg/hQEqARzAgYBhAAA//8Ad//sBecFzQIGAu4AAAACAHf/7AheBc0AGgAjAEVAIxkAGwYAIBESEiAGAyQlGQsLCRISFiIJImlZDQkEAx5pWQMTAD8rABg/MysRADMYPxI5ETMREgEXOREzETMRMxEzMTABEAAhIAAREAAhIBc2MzIWFREhETQmIyIGBxYFFBYzIBEQISAFvv6l/rj+t/6lAV0BSQE7rqf90eP+y2xnSW0tS/v+sq0BXv6k/p8C3f6T/nwBhAFvAWsBg7m5+ej8FAPTen4qMKjs9/YB7QHuAAAAAgBc/hQG1QRzABkAJQBDQCIaBhggERISIAYDJicYCwsDCRIbFiMJI11ZDQkQAx1dWQMWAD8rABg/MysRADMYPxESOREzERIBFzkRMxEzETMxMAEQACMiABEQADMyFzYzMhYVESERNCYjIgcWBRQWMzI2NTQmIyIGBG/+6vfw/uoBFffxjIDjw8r+z1Zecjwt/SRkb25jZG9tZAIx/u3+zgE1ARABEwEvoKDTx/s7BHl5eUh2kKepqaempqUAAAAAAgAKAAAFpAW2ABYAHwBeQDQLEBcFBQYAGxsGEAMgIQAOEA4CEwMODgYTBBdpWVAEAQ8EHwQCCQMEBBMGEh8IEwhpWRMDAD8rEQAzGD8SOS9fXl1dKxESADkYL19eXRESARc5ETMRMxEzETMxMAEUBCEjESERIyIGFRQXIyY1NDYzISAEATMyNjU0JiMjBaT+2f7whv7LNUE8FPEZw7gCAAEKARX9Q2eNj3Z/jgPu7Pr9+AS4Qy8uNTVCp7Xl/jVwbW1oAAIAoP4UBLQGHwAfACsASUAmGiQCDwMSEhMJKRMpLC0OAwwGExsXHF1ZFwEGIF1ZBhAMJ11ZDBYAPysAGD8rABg/KwAYPxESOTkREgE5OREzETMRFzMzMTABFAczNjYzMhIREAIjIicjFxcRIRE0NjMyFxUmIyIGBxMiBgcVFBYzMhE0JgHRDAwyomnG4N/H1WgOBwf+z7OzVEw5ODAxA9txaAJrdM1lBKpbglFV/sv+8/7v/syJPl7+Owaqq7Yf6RQ1Pv7Hi6AhtJwBUqWlAAACALj/MwVIBbYADwAYAFZAKw0aDBQFEAEBAgkUAhQZGg4CBRhpWQAFAQ4DBQUCAwwAABBpWQAAAgMDAhIAPz8SOS8rEQAzERI5GC9fXl0rABgQxhESATk5ETMRMxEzMxEzETMxMAERIREhFTMgBBUUBgcBIQEnMzI2NTQmIyMB7v7KATZ0ASoBHomHAa7+qP6jpWSTjI+WXgFk/pwFts3Z3X7HPv2DAjH8YmloWQAAAAEAVv/sBBAFywAkADxAHw4AIBMaABMHAAclJgcTABoEIhAQCmlZEAQiHWlZIhMAPysAGD8rERIAFzkREgE5OREzETMRMxEzMTATNDY3PgI1NCYjIgYHJzYzMhYVFAYHDgIVFBYzMjcRBiEiJFamvopuL19bU6ZlXOPv2vejy31qNmtqzOK8/vzw/v4Bi5fWUjlDRzBDSS0p8WfTwZnQUzNESi9LVF7+/lzTAAABAE7/7AOoBHMAIgA8QB8NAB4SGQASBgAGIyQGEgAZBCAPDwlgWQ8QIBtfWSAWAD8rABg/KxESABc5ERIBOTkRMxEzETMRMzEwEzQ2NzY2NTQmIyIGByc2MzIWFRQGBw4CFRQzMjcVBiMiJk6Oq5xWTUVLklJSwd/E3oeyYWcstLupotrk5QE3f6tBOzomKSsuJNxYoJGAnkUmNi4kZljoT6kA//8ATgAABHkFtgIGAXAAAAAC/4f+FANYBikAFQAgAEVAJhQiGQgIDh0DAyEiBRyDWQAFEAVQBQMJAwULCxaDWQsBABFdWQAbAD8rABg/KwAYEMRfXl0rERIBOREzMzIRMxEzMTABIiY1ESMiJjU0NjMgEREUFjMyNxUGASIGFRQWMzM1NCYCUr20N4yXpYgBXko8UHBt/Z4iKDk4MTD+FLm6BLaDeW2D/pH6zUE+I+M0B20rHyY2QDQyAAAAAQAv/hQDNwVMACAAUkApFh4CGA8NFBgYDR4IDQghIhIUQA4XERQUF2BZFA8KG11ZChYABV9ZABsAPysAGD8rABg/KxEAMxEzGhgQzRESATk5ETMRMxEzETMRMxEzMTABIic1FjMyNjU1IyImNREjNTc3MxUhFSERFBYzMjcRFAYCFFZNLzM4Lhq3p5KoWMMBOf7HSTxQcJT+FBvVEjk9hLm5AhuBZuzu5f3lQT4j/mawpAAAAQAKAAAEyQW2ABEAOUAeBgsAARABCwMSEwAJEAkCEwMJCQ4BEhEDDgNpWQ4DAD8rEQAzGD8SOS9fXl0REgEXOREzETMxMCEhESMiBhUUFyMmNTQ2MyERIQM7/suJQTwU8RnDuANE/nIEtEMvKjU1Qqe1/v4AAAAAAQAv/+wDNwYfAB8ARkAkEx0KCBkdHQwIGwMIAyAhEBVdWRABCRwZHGBZDBkPBQBdWQUWAD8rABg/MysRADMYPysREgE5OREzETMzETMRMxEzMTAlMjcVBiMiJjURIzU3NTQ2MzIXFSYjIgYVFSEVIREUFgJ3UHByprenkpLAw2VXS08+NgE5/sdJ3yPjM7m5AhuBWGystSHqFzo5WuX95UE+AAABACn+FAR5BbYAEQAvQBgGAAALEAsNAxITEQ0ODWlZDgMJA2tZCSMAPysAGD8rEQAzERIBFzkRMxEzMTAFFBYzMjcVBgYjIBERIREhESEC7DwwPjMZXzT+mf5zBFD+c30/PBXqCxQBaQU3AQL+/gAAAQA3/+wGEgW2ACEAREAkCQ0ZHxUcGAoGDQMDBhgVBCIjChwGAxkaGWlZBxoDEQBpWRETAD8rABg/MysRABczERIBFzkRMxEzETMRMzMRMzEwJTI2NTQmJxEhESEWEhUUAgQjIiQCNTQSNyERIREGAhUUFgMltMWBigJ//o6Wp6r+w9HT/sWqppv+iQKDiIbD7tjJyfxMARb+/Fz+vsa3/uqVlQEXuMYBPl4BBP7qR/79ycbZAAAAAAEAAAAABPgFywAbAChAEwITGQ4OHB0HAQIDFhFpWRYEARIAPz8rABg/EjkREgE5ETMzMjEwISEBIQEWFhc2NjcTNjY1NCYjIgc1NjMyFhUUBwNI/sL99gE5AScONwkGPxGPKxU2JjYiSGKSnUwFtvyaJ+E6NN4mAVhlTB4wLhPwI52Kd8kAAAABAAAAAAT+BcsAEwAuQBcGFREICQkUFQQKBwMJBQMJEgAPaVkABAA/KwAYPz8SFzkREgE5ETMyETMxMBMyFhcTASEBESERAy4CIyIHNTagZ4xAsAEtAU7+G/7M4Sk0MSooJEwFy2uJ/okCVvyD/ccCLQHZVU4iE/AjAAAAAQAU/hQE4QRzACAAMEAZDh8YIAQhIgMfHxYgDwoQYVkKEBYcXVkWGwA/KwAYPysAGD8SOREzERIBFzkxMAETFhczNjcTNjYzMhYXFSYjIgYHAQIhIic1FhYzMjc3AQFgsC4UBiQibjGWaixeGiYfNksn/tmE/qVFTwxIIapDFf5DBF79+IF/lV8BL4V1EwzsC11u/NP+mRHyAwrDOwRWAAEAMQAABHEFtgARAIpAVwMOBg0NCQcCCxARDggSEwoRABFpWQcPAH8AAl0AAUwAARgAAb8A3wACKgABqgABbgABTAABAB4MSRkAAQMPAAEJBgAADgYDBAQDaVkEAw8LDg4LaVkOEgA/KxESADkYPysREgA5EjkYL19eXV9dK11dXXFdcXFxcTMrEQAzERIBFzkRMxEzMTATIRMhESEVATMVIQMhESE1ASN/AXf4/VYEGv7hv/6P6wLP+8ABFccDUgFkAQDI/mT+/qz/AMkBiwABADcAAAOqBF4AEQB2QEIMDgMQCwAADggBEAQOBwQHEhMPBAUEZlkMZwUBPAVMBcwF3AUEAw8FnwUCDQUFBQELCAkJCF5ZCQ8CEAEBEF5ZARUAPysREgA5GD8rERIAORI5GC9fXl1fXV0zKxEAMxESATk5ETMRMzMzETMRMxEzETMxMCEhNRMjNSE3ITUhFQczFSEHIQOq/I3XoQEznf4ZA0K5nP7TrgIKtAEjx9fpxvrH7v//ADn/7ARqBbYCBgLmAAAAAQBQ/+wEgQW2ABkAREAkExgEFAQQCxcAEAQaGxMBbFkZExMNFBgVFRhpWRUDDQdrWQ0TAD8rABg/KxESADkSORgvMysREgEXOREzMxEzMzEwASMiBhUUFjMyNjcRBiEgJDU0NjcBNSERIQEDoHvQ0KWnbOlbwP7z/uD+vPz2/k8Dv/3lAWsCgWFobWo2K/75T+TQxdwTAZzG/wD+pAAAAAEAUP4UBFgEXgAZAFNAKxgTFwsLABQEEAATEBMaGxkAX1kTDxkBDwMZGQ0UGBUVGF5ZFQ8NB11ZDRsAPysAGD8rERIAORI5GC9fXl0zKxESATk5ETMRMzMRMxEzETMxMAEjIgYVFBYzMjY3EQYhIAA1NBI3ATUhFSEBA4t3x8iil2TbW8X/AP7u/s/57v5QA8f9sgGLAR2CiX+JMy3+/lQBBefWAQIiAZ7G6f6BAAAAAQBi/hQELQReACMAaEA3CwYAChkOBBQHFB8OAB8AJCURIYRZDBEcEQINAxERHAULCwRfWQsLJAoHCAgHXlkIDxwWYFkcGwA/KwAYPysREgA5EjkYLysRADMSORgvX15dKxESATk5ETMRMzMRMxEzMxEzMzEwATQmIyM1ASE1IRUBFhYVFAQFBgYVFDMyNxUGBiMiJjUQJTY2AviCrYwBNP4GA4v+lsjN/vX+6lhOzbzkWe6AxOYBjJlxASU/NMwBEenE/tkQr5W0shUHKCNcUOIkLKaaAS4eCzgAAAEANwAABDUGHwAfAGJANB0bBwINHwQEGxQFGw0NCAUDICEACAkIhlkdDwkvCQILAwkJGAUYEGtZGAEGBQIFAmxZBRIAPysREgA5GD8rERIAORgvX15dMysRADMREgEXOREzETMRMxEzETMzETMxMAEBFSEVITUBITUhNjY1NCYjIgYHJz4CMzIEFRQHMxUDGf6FApf8AgGM/pcCDjUoaldNiliWXoWgY9IBAVCNApP+ZQ/p4wGwx0GFSldqQla7Vksw6rqdhMcAAQA5/+wEUAW2ABoARUAjGAMVCQkaEwMQExAbHBoTaVkaGgYWGRUWFWlZFgMGDWtZBhMAPysAGD8rEQAzERI5GC8rERIBOTkRMxEzMxEzETMxMAEyBBUUBCEiJicRFhYzMjY1NCEhESMRIREhEQJQ6QEX/rj+z3beSkzkZKWn/tP++6ED3/30A6b41fH8KCcBCyg3cmzfAhABAv7+/vIAAAEATv/sBCMEXgAZAE1AKBcDFAkJGRIDDxIPGhsZEntZDRkBDQQZGQYVGBQVFGBZFQ8GDV5ZBhYAPysAGD8rEQAzERI5GC9fXl0rERIBOTkRMxEzMxEzETMxMAEyFhUUBCEiJic1FhYzIDU0ISMRIzUhFSEVAkTt8v7W/uR22EFH4loBKf7Pw6wDov4pArKtpba+Jh/8JDKFgQGc5eXHAAABAEr/7AOsBUwAJABIQCMjBxwNDRohAAAaBxQaFCUmHyFAHiEbJCEkYFkhDwoRX1kKFgA/KwAYPysRADMRMxoYEM0REgE5OREzETMRMxEzETMRMzEwARQWFxcWFhUUBiMiJic1FhYzMjY1NCYnJiY1NSM1NzczFSEVIQIrJjZtX1n253LTQEzLU25hLDOta8LGWMMBG/7lAzE4OB8/NotgpLImH/wpOTc3IjMbWaOCUoFm7O7lAAAAAgCg/hQEtARzAA4AGAA6QB0EEwAAAQsWARYZGgUOBw4TYVkOAg8BGwcPXVkHEAA/KwAYPz8vKxESADkREgE5OREzETMRMzMxMAEhETMXMzYzMhYWFRAABRMiBhURNjY1NCYB0f7P+CsOa9J7wmn+ff6g23hjw+lv/hQGSpGmgOuZ/uv+picDppqX/o8Y3KJ8kAAAAAEBov4UAn0GFAADABZACQIDAwQFAAADIwA/PxESATkRMzEwATMRIwGi29sGFPgA//8Am/4UA4cGFAAnA7v++QAAAAcDuwEKAAAAAAABAGb+FAO6BhQAEwBiQDUFAQoOEhIHAxMMEBATAQMUFQ0FBgWGWQoGsAYBDwYBDgMRAQ4CAQKGWUABAQYBBgETCAATIwA/PxI5OS8vXSsRADMRM19eXV0RMysRADMREgEXOREzETMzMxEzMxEzMTAlITUhNSE1IREzESEVIRUhFSERIwGi/sQBPP7EATzbAT3+wwE9/sPb+MbmxgKq/VbG5sb9HAAAAP//AHX/5QHTBbYCBgAEAAD//wC4AAAKSAdzACYAJwAAACcAPQXXAAABBwFMBccBUgAbQBIDHwUmAwsiGxUWJQIxEhkAKiUrNSs1ACs1AAAA//8AuAAACaIGIQAmACcAAAAnAF0F7AAAAQcBTAWDAAAAFEAOAwwiGxUWJQI4EhkAKiUrNSs1AAD//wBc/+wIxwYhACYARwAAACcAXQUQAAABBwFMBKgAAAAUQA4DDTIrJSYlAkwiKQ46JSs1KzUAAP//ALj+UgZzBbYAJgAvAAABBwAtBIUAAAALtgEjDhEFFSUBKzUAAAD//wC4/hQGZAYUACYALwAAAQcATQSFAAAADbcCASMOEQUeJQErNTUA//8AoP4UBFAGFAAmAE8AAAEHAE0CcQAAAA23AgFQDA8AHCUBKzU1AP//ALj+UghvBbYAJgAxAAABBwAtBoEAAAALtgFcGBsAHyUBKzUAAAD//wC4/hQIYAYUACYAMQAAAQcATQaBAAAADbcCAVwYGwAoJQErNTUA//8AoP4UByEGFAAmAFEAAAEHAE0FQgAAAA23AgFNHSAALSUBKzU1AP//AAAAAAWFB44CJgAkAAABBwFMAFYBbQATQAsCABYPBQYlAhMFJgArNQErNQAAAP//AFb/7AQ7BiECJgBEAAABBgFMCgAADrkAAv/6tCskEhclASs1AAD////cAAADQQeOAiYALAAAAQcBTP8iAW0AE0ALAREFJgEAFQ0GCyUBKzUAKzUAAAD///+GAAAC6wYhAiYA8wAAAQcBTP7MAAAAC7YBAA0FAgMlASs1AAAA//8Ad//sBecHjgImADIAAAEHAUwAwwFtABNACwIbBSYCAB8XBgAlASs1ACs1AAAA//8AXP/sBJgGIQImAFIAAAEGAUwMAAAOuQAC//+0IxsTDCUBKzUAAP//AK7/7AVeB44CJgA4AAABBwFMAJoBbQATQAsBGAUmAQAcFAkBJQErNQArNQAAAP//AJr/7ASiBiECJgBYAAABBgFMMwAAC7YBAR4WCRQlASs1AP//AK7/7AVeCCkCJgA4AAABBwlMAwYBUgAbQA8DAgEsBSYDAgEBFhUJASUBKzU1NQArNTU1AAAA//8Amv/sBKIG1wImAFgAAAEHCUwCoAAAABBACQMCAQMYFwkUJQErNTU1AAD//wCu/+wFXgheAiYAOAAAAQcIiAMGAVIAG0APAwIBMQUmAwIBGxwYCQElASs1NTUAKzU1NQAAAP//AJr/7ASiBwwCJgBYAAABBwiIAp4AAAAQQAkDAgEbHhoJFCUBKzU1NQAA//8Arv/sBV4IYAImADgAAAEHCUsDBgFSABtADwMCASgFJgMCAQA5MgkBJQErNTU1ACs1NTUAAAD//wCa/+wEogcOAiYAWAAAAQcJSwKgAAAAEEAJAwIBAjs0CRQlASs1NTUAAP//AK7/7AVeCF4CJgA4AAABBwiJAwYBUgAeQAkDAgExBSYDAgG4/+W0Fy4JASUBKzU1NQArNTU1//8Amv/sBKIHDAImAFgAAAEHCIkCngAAABKyAwIBuP/ltBkwCRQlASs1NTUAAgBY/+wEXgRzAAYAGwBVQDIYBBAKEhIDEAMcHREEZlkPER8RAs8R3xECGREBAw8RAQsGERENBwcVYFkHEA0AX1kNFgA/KwAYPysREgA5GC9fXl1fXV1xKxESARc5ETMRMzMxMCUyNjchFhYTIAAREAAjIgA1NSEmJiMiBgc1NjYCTFl1Cf5UAm85ARABLf7q+uz+9gLRBZCCX7JpVb/FcXpwewOu/tP+8f7o/s0BC/CUgpImMuwsJAAAAP//AAAAAAWFCCkCJgAkAAABBwlMAsMBUgAbQA8EAwIAEiQFBiUEAwInBSYAKzU1NQErNTU1AAAA//8AVv/sBDsG1wImAEQAAAEHCUwCdwAAABKyBAMCuP/7tCc5EhclASs1NTX//wAAAAAFhQgpAiYAJAAAAQcJTwLDAAAAF0ANAwIDEhgFBiUDAhsFJgArNTUBKzU1AAAA//8AVv/sBDsG1wImAEQAAAEHCU4CdwAAABCxAwK4//y0JiUSFyUBKzU1AAD//wAAAAAHJQb+AiYAiAAAAQcBTQHlAVIAFrkAAv95QAkXFgYHJQIXBSYAKzUBKzX//wBW/+wG/gWsAiYAqAAAAQcBTQE9AAAADrkAA//NtDw7FB8lASs1AAEAd//sBawFywAhAJ9AXhEEBh0hGxcMAgYGHxsMGyIjBR0PHT8dAgwGHh1tWQIfHgEeHgkAGSEBCSGZIQINAw8hARUGACGBWQ8AHwCvAL8ABAkDAEANEUgAAAkPDxRpWQ8EABkBDAYJGWxZCRMAPysAX15dGD8rERIAORgvK19eXSsAX15dX15dcRESORgvcTMrAF9eXREzERIBOTkRMzMRMxEzETMzETMzMTABIREzFSMRBgYjIAAREAAhMhcHJiMiAhUQITI3NSE1ITUhAuMCRIWFjfmC/rX+owGVAWfh0WegrcnyAYdjWP8AAQD+6wNe/rDG/vcuJQGFAWwBYgGMWvhQ/vLk/gIVWsZjAAAAAgBc/hQE7ARzACAALQB7QEULKwQrFR4bJCAAACQPDQoVBi4vGg8YEgALDAuDWR4MDAIQGBISIWBZHxIBAw0SAQ0EEkAJEkgSHA8YKF1ZGBACB2JZAhsAPysAGD8rABg/LytfXl1fXSsREgA5EjkYLzMrEQAzERI5ORESARc5ETMRMzMRMzMRMzEwBQIhIic1FjMyNjchNSE1NyMGIyICNRASMzIXMzchETMVATI2NTU0JiMiBhUUFgRYWv5E9a3P5UhtFP7VAU0JCWzRy9vjy852CBkBAnv9g31obnxrbGu6/s5C6VYtMJ1xiaYBFv0BBAElpI/7hZ0B132PGaWUlJ2ckf//AHf/7AUnB3MCJgAqAAABBwFMAKgBUgATQAsBIAUmAUUkHAgCJQErNQArNQAAAP//AFz+FARxBiECJgBKAAABBgFM3gAADrkAAv/ktDAoHiYlASs1AAD//wC4AAAFUAdzAiYALgAAAQcBTABcAVIAFbQBEgUmAbj/xbQWDgYAJQErNQArNQD//wCgAAAE9gecAiYATgAAAQcBTABGAXsAFbQBFAImAbj/6LQYEAsFJQErNQArNQD//wB3/hQF5wXNAiYAMgAAAQcBUQJgAAAAC7YCBiEcBgAlASs1AAAA//8AXP4UBJgEcwImAFIAAAEHAVEBlgAAAA65AAL/8bQlIBMMJQErNf//AHf+FAXnBxkCJgAyAAAAJwFRAmAAAAEHAU0AwwFtABtAEgMrBSYCBiEcBgAlAwIrKgYAJSs1KzUAKzUAAAD//wBc/hQEmAWsAiYAUgAAACYBTQwAAQcBUQGWAAAAF7kAA//xQAwpJBMMJQIAHRwTDCUrNSs1AP//ADn/7ARqB3ECJgLmAAABBwFM/9QBUAAWuQAB//dACSIaAwQlAR8FJgArNQErNf//ADn+FARWBiECJgLnAAABBgFMzgAADrkAAf/1tCMcAwQlASs1AAD//wC4AAAKSAW2ACYAJwAAAQcAPQXXAAAAC7YCMRIZABslASs1AAAA//8AuAAACZYFtgAmACcAAAEHAF0F7AAAAAu2AjgSGQAbJQErNQAAAP//AFz/7Ai6BhQAJgBHAAABBwBdBRAAAAALtgJMIiAOKyUBKzUAAAD//wB3/+wFJwdzAiYAKgAAAQcAdgEzAVIAE0ALASMFJgHQHCAIAiUBKzUAKzUAAAD//wBc/hQEcQYhAiYASgAAAQYAdjsAAAu2AkEoLB4mJQErNQAAAQC4/+wHtAW2ABgAgEBNFREREgAWDgkGBg4SAxkaBw8AFQEMBhUQaVlGFQHWFQESFQEDIRUBsRUBBKMVAUwVATsVARkVAQMPFY8VAgkGFRUSFxMDEhILA2tZCxMAPysAGD8/MxI5L19eXV9dXV1dX11xX3FdcSsAX15dGD8REgEXOREzETMzETMRMzEwARQWMzI2NREhERAhICY1NSERIREhESERIQUEV2ZmWAE1/g3+/vD+H/7KATYB4QE1Ad+Edn2UAmj9lP364vW0/YkFtv3DAj0AAAAAAgC4/hQFAgXNAA8AGQA6QB0EFAAAAQsXARcaGwUIDw8UalkPAgMBIwgQaVkIBAA/KwAYPz8vKxESADkREgE5OREzETMRMzMxMAEhETMXMzY2MzISFRQCAAcBIgYVETYANTQmAe7+yuwxCELObcPluf6d+AESgJLVAQZo/hQHorxjcP7i/dL+jP7pTQTD4sj+CFUBZ8uJkv//ALgAAAXJB3MCJgAxAAABBwBDAH0BUgAVtAEYBSYBuP+ptBQYCA8lASs1ACs1AP//AKAAAASoBiECJgBRAAABBgBD3gAADrkAAf+ntBkdCxQlASs1AAD//wAAAAAFhQdzAiYAJAAAAQcDcwUXAVIAF0ANAwIlGBcFBiUDAiEFJgArNTUBKzU1AAAA//8AVv/sBDsGIQImAEQAAAEHA3ME2wAAAA23AwIuLSwSFyUBKzU1AP//AAAAAAWFB30CJgAkAAABBwTxAsMBUgATQAsCAA4UBQYlAhsFJgArNQErNQAAAP//AFb/7AQ7BisCJgBEAAABBwTxAnEAAAAOuQAC//W0IykSFyUBKzX//wBQAAAEAgdzAiYAKAAAAQcDcwTRAVIAGbYCAR8FJgIBuP+ntBoVAgslASs1NQArNTUA//8AXP/sBGIGIQImAEgAAAEHA3ME6QAAABCxAwK4/720KiUKESUBKzU1AAD//wC4AAAEAgd9AiYAKAAAAQcE8QJcAVIAFbQBGQUmAbj//7QMEgILJQErNQArNQD//wBc/+wEYgYrAiYASAAAAQcE8QJtAAAAC7YCDhwiChElASs1AAAA////cwAAAtsHcwImACwAAAEHA3MD9AFSABdADQIBNRYVBgclAgEfBSYAKzU1ASs1NQAAAP///wwAAAJ0BiECJgDzAAABBwNzA40AAAAQsQIBuP+ItBINAgMlASs1NQAA//8ABAAAAxYHfQImACwAAAEHBPEBjQFSABW0ARkFJgG4//+0DBIGCyUBKzUAKzUA////rgAAAsAGKwImAPMAAAEHBPEBNwAAAA65AAH//7QECgIDJQErNf//AHf/7AXnB3MCJgAyAAABBwNzBZEBUgAZtgMCKQUmAwK4/5W0JB8GACUBKzU1ACs1NQD//wBc/+wEmAYhAiYAUgAAAQcDcwTpAAAAELEDArj/orQoIxMMJQErNTUAAP//AHf/7AXnB30CJgAyAAABBwTxAy8BUgATQAsCIwUmAgAWHAYAJQErNQArNQAAAP//AFz/7ASYBisCJgBSAAABBwTxAnkAAAAOuQAC//+0GiATDCUBKzX//wBxAAAFSAdzAiYANQAAAQcDcwTyAVIAGrEDArj/dEAKJCAMECUDAioFJgArNTUBKzU1/////gAAA3cGIQImAFUAAAEHA3MEfwAAABCxAgG4/6e0HxoLAiUBKzU1AAD//wC4AAAFSAd9AiYANQAAAQcE8QKRAVIAFrkAAv/gQAkXHQwQJQIkBSYAKzUBKzX//wCDAAADlQYrAiYAVQAAAQcE8QIMAAAAC7YBABEXCwIlASs1AAAA//8Arv/sBV4HcwImADgAAAEHA3MFhQFSABm2AgEmBSYCAbj/srQhHAkBJQErNTUAKzU1AP//AIf/7ASiBiECJgBYAAABBwNzBQgAAAAQsQIBuP+dtCMeCRQlASs1NQAA//8Arv/sBV4HfQImADgAAAEHBPEDBgFSABNACwEgBSYBABMZCQElASs1ACs1AAAA//8Amv/sBKIGKwImAFgAAAEHBPECoAAAAAu2AQIVGwkUJQErNQAAAP//AF7/7ATXBcsCBgGxAAAAAQAU/hQEMQRzACcAbEA/FxgYFAkbAAAJBA8hBSgpFwQFBQRgWc4FAQS6BQGABQFFBQEjBQESBQEDDwUBCgYFBR8REQtdWREQHyVdWR8bAD8rABg/KxESADkYL19eXV9dXV1dXV9dKxESADkREgEXOREzETMzETMxMCU0JiMjNTMyNjU0IyIGByc2ITIWFRQGBxUWFhUUBgQjIicRFhYzMjYC/MPAmJTIt/hKukha2AEJ1/aOlaOtkf7uv/vAWeJpnqYQiXvmZGm2JyHdYdC3iqUlDRjJopfkeVABBis1h///ALgAAAVmB3MCJgArAAABBwFMAKIBUgATQAsBEQUmAQAVDQYLJQErNQArNQAAAP//AKAAAASoB5wCJgBLAAABBwFMADkBewATQAsBGwImAQEfFwoVJQErNQArNQAAAAABALj+FAVoBc0AFAAzQBkNCQkKAAEKARUWDQoRCwMKEgEjEQVpWREEAD8rABg/Pz8REjkREgE5OREzETMRMzEwASERNCYjIgYVESERMxczNjYzMhYVBWj+y32FqJv+yuwxCETkjOTz/hQFhpiZ1PH8+gW2vGZt/esAAAD//wBc/hQFTgYUAgYERQAAAAIAbf/sBbQFtgAaACIAVkAtAx8OGxsLFBEAGAUfHxgRCwQjJAMOIRYWIWlZDxYBDQMWFggSCB1pWQgTGRIDAD8zPysREgA5GC9fXl0rERIAOTkREgEXOREzETMRMxEzETMRMzEwARQGBwQRFAAhIAA1NDY3JiY1NSEVECEgETUhARAhIBEQISAFcWd0AR7+pP64/rj+pYyQcmcBNQEtASkBNvxBAV4BX/6j/qAFL5PBO3b+1P7+7AER/43bPDm5m4mH/uUBG4f8Sv7wARABEAACAFz/7ASsBhQAHQAnAExAJwMjDx4eDRUSABsGIyMbEg0EKCkDDyYYGCZdWRgYCRwTAAkhXVkJFgA/KwAYPzMSOS8rERIAOTkREgEXOREzETMRMxEzETMRMzEwARQGBxYWFRQAISImJjUQJSYmNTUhFRQWMzI2NTUhARQWMzIRNCYjIgRvbHKHlP7Y/v2i+okBE29mATFWYmNXATL9JHZ88HZ88AVIo8k0NtSe+f7lgPCgATpyM8SrysqfioahzPvuk44BI5OU//8AMf4UBHEFtgImAD0AAAEHA30CSAAAAAu2AQATEwkJJQErNQAAAP//ADf+KQOqBF4CBgYWAAD//wAAAAAFhQdmAiYAJAAAAQcBTwGJAVIAE0ALAgAOEgUGJQIVBSYAKzUBKzUAAAD//wBW/+wEOwYUAiYARAAAAQcBTwExAAAADrkAAv/utCMnEhclASs1//8AuP4UBAIFtgImACgAAAEHAHoBlgAAAA65AAH/97QSDAILJQErNf//AFz+FARiBHMCJgBIAAABBwB6AbAAAAALtgIrKisKGSUBKzUAAAD//wB3/+wF5wgpAiYAMgAAAQcJTAMvAVIAG0APBAMCLwUmBAMCARkYBgAlASs1NTUAKzU1NQAAAP//AFz/7ASYBtcCJgBSAAABBwlMAnkAAAAQQAkEAwIAHRwTDCUBKzU1NQAA//8Ad//sBecIKQImADIAAAEHCU0DLwFSABdADQMCHwUmAwIBHysGACUBKzU1ACs1NQAAAP//AFz/7ASYBtcCJgBSAAABBwlNAnkAAAANtwMCACMvEwwlASs1NQD//wB3/+wF5wdmAiYAMgAAAQcBTwH2AVIAE0ALAh0FJgIAFhoGACUBKzUAKzUAAAD//wBc/+wEmAYUAiYAUgAAAQcBTwE/AAAADrkAAv/+tBoeEwwlASs1//8Ad//sBecIKQImADIAAAEHCU8DLwAAAA23AwIBGRgGACUBKzU1AP//AFz/7ASYBtcCJgBSAAABBwlOAnkAAAANtwMCAB0cEwwlASs1NQD//wAAAAAE/gb+AiYAPAAAAQcBTQASAVIAE0ALAQwFJgEBDAsHAiUBKzUAKzUAAAD//wAA/hQEjQWsAiYAXAAAAQYBTdoAAAu2AQEaGQAJJQErNQAAAgBO/7QDewYUABUAHgBUQC4NEAIdHRUSCBkZEhADHyASDQUPC0AFG4VZDwVPBX8FjwUECQMFCwAACxaFWQsWAD8rABg/EMRfXl0rABoYEM4SOTkREgEXOREzETMzETMRMzEwEyERNjYzMhYVFAYjIicGByc2NyYmNQUyNjU0IyIHFqABMRxULIKMrpq9Uxcvj0AqEQcBsC0xYEQ5DQYU+70SGYaAgIpDI1hKeT0jckLpLSVSMnIAAAIAoP+0BikEcwAmAC8Aa0A8CwgeIRMuLiMZKiojJiEIBTAxIx4WIBxADAgPCQ8IFQ8DXVkPEBYshVkPFk8WfxaPFgQJAxYcHCeFWRwWAD8rABgQxF9eXSsAGD8rABg/PxESORoQzhI5ORESARc5ETMRMxEzETMRMzEwATQmIyIGFREhETMXMzY2MzIWFRE2NjMyFhUUBiMiJwYHJzY3JiY1BTI2NTQjIgcWA05NVXRn/s/pKREtqm65vhxULIKMrpq9Uxcvj0AqEQcBsC0xYEQ5DQKNeXmrxv3yBF6PTlbSyP74EhmGgICKQyNYSnk9I3JC6S0lUjJyAAAAAAIAL/+0A5wFTAAdACYAbkA8CCEBFRgGCiUlGhAhIRodGAQnKBoVDRcTQAUFAwYACQYJYFkGDw0jhVkPDU8Nfw2PDQQJAw0TEx6FWRMWAD8rABgQxF9eXSsAGD8rEQAzETMzGC8aEM4SOTkREgEXOREzETMRMzMRMzMRMzEwEyM1NzczFSEVIRE2NjMyFhUUBiMiJwYHJzY3JiY1BTI2NTQjIgcWwZKoWMMBOf7HHFQrgo2umr1TFy+PQCoRBwGwLTFgRDkNA3mBZuzu5f5YEhmGgICKQyNYSnk9I3JC6S0lUjJyAAMAXP/sB1QGFAAcACkANgBTQCwgAAsuLiYNBhM0NAYmCAAFNzgGDRgDGgMJACodAx1dWRADEDEjGiNdWRYaFgA/MysRADMYPzMrEQAzGD8REhc5ERIBFzkRMxEzETMRMxEzMTATEBIzMhczJjURIREUBzM2MzISERAAIyAnBiEiAAEiBhUUFjMyNjU1NCYhIgYVFRQWMzI2NTQmXODG0msNDQEyDQ1r0sbg/uvz/wB3ef7+7/7xAghhcGtscGVsAnlubWdubGtwAi8BDwE1pnxhAWr+ljilpv7M/vD+7/7OvLwBLwJkp6mqpJqkNZ+MjJ81qJajq6mnAAAAAAMAXP4UB1QEcwAcACkANgBTQCs0EwYnCQktDQoAICAKEwM3OA0GGAMWEAobIzEWMV1ZGhYQHSoQKl1ZAxAWAD8zKxEAMxg/MysRADMYPxESFzkREgEXOREzETMzMxEzMxEzMTABEAIjIicjFhURIRE0NyMGIyICERAAMzIXNiEyAAEyNjU0JiMiBhUVFBYhMjY1NTQmIyIGFRQWB1ThxdVoDQ3+zg0NaNXF4QEU9P94fgEH7QEH/fhhcGlub2Zs/YdvbGVwbmlwAi/+7/7Opak0/mABoF5/pQEyAREBEQEzvb3+0/2Zp6mqpJaoNZ2Ojp01pJqkqqmnAAAAAAMAAP9mBYUGFAAOABQAFwBmQDUCGQsXDRQIBwkFCBYPARAADQ0QEg8ICQYYGRILDgwPBwEiAwcLQAUJFAlpWRYUFAwDCxIMAwA/PzMSOS8zKxEAMxoYEM5fXl0QzhI5ERIBFzkRMxEzETMRMxEzETMRMzIRMzEwAQMBIQMhAyMTIwMhASE3ARMmJwYDJQczBFh7Aaj+smr+567HrzZq/rICBAFvHv63phUWIZwBRj51BhT+nvtOAVz+CgH2/qQFvFj8TAHdQ1mA/gewsAAAAAACAHf/ZgTRBhQAHQAlAGdAOAwBIRYQEwcjBiQBHBwkIxMSFgYmJx1AIylIHRpABh4aHmlZARoEDxEBIgMRJkAjCRMODglpWQ4TAD8rEQAzETMaGBDMX15dPzMrEQAzGhgQzisREgEXOREzETMRMxEzETMRMzEwAQcWFwcmJwEWMzI3EQYjIicHIzcmAjU0EiQzMhc3AyICFRAXASYEjy0uQWRWD/6/Ijya27TeU0YyxkWfpqYBN9FBRB+kr8BrATMQBhSDDx78JgX8YwhN/vxLDJLLUgFZ/eQBV7cKU/61/vnr/vN2A3EEAAAAAAIAXP5WBDEGFAAcACIAXEAwDiQEBxoPHQcCBRQfEyAPDAwgHwUHBSMkHxMWIAQiDQAPCgogXVkKEAUAABZdWQAWAD8rEQAzGD8rEQAzGD8/ERI5ORESARc5ETMRMxEzETMRMxEzETMRMzEwBSInAyMTJhEQACEyFxMzAxYXBycDFzMyNjcRBgYBFBcTBgYCZjUxkcem8gEcAQksKJjEpDMdWkzfFBVYlktKl/66I8l5cxQG/mQB0XoBiAEdAS0EAaX+NBMO7B39jQIvMv77LyQCP5JNAjECqAAAAAABAC8AAAQ/BbYADQB6QEsGCA0LBAgIAQsLCg4PBw0ADWlZBA8AfwACXQABTAABGAABvwDfAAIqAAGqAAFuAAFMAAEAHgxJGQABAw8AAQkGAAALAgMLCGlZCxIAPysAGD8SOS9fXl1fXStdXV1xXXFxcXEzKxEAMxESATk5ETMzETMRMxEzMTATMxEhETMVIxEhESERIy+JATbt7QJR/HmJA1ICZP2c/v6s/wACVAAAAAIAKf9mBKwGFAAPABIAVUAtBxQADRAMDAENCRERDQMDExQABhAGAiEGBA8AASIDAEANEggEChIDBANpWQQDAD8rEQAzMxEzGD8azF9eXRDOXl0REgEXOREzETMzETMRMxEzMTAXAREhESE3MwcVIwERITUDATcjhwEv/nMDrjGkM4X++P7KiwHBZGSaAkIDDAECXmL+/gj9RG/+9wSQvgABAFz+FAOsBHMANQBMQCgpAAMcCwAWLyQAHCQcNjcvJBwABAMnJy1gWScQAxpfWQMWDghdWQ4bAD8rABg/KwAYPysREgAXORESATk5ETMRMzMRMxEzETMxMAEUBgcWFxYWMzI3FQYGIyImJicmJicnNRYWMzI1NCYmJy4CNTQ2MzIXByYmIyIVFBYXHgIDrM7OHBMkVEdGNyFqL2KGYTknR0AzVdVRpixsWoF5N+fUyr9cVJJMh1eTg3o6AUyfswwmLFFBE+4MDzp0imFQHBj4KDZgJC05JjZcd1eVo1jcJC5JKTw7NVx4AAAAAAEAN/4UA+cEXgAZAEVAIxAbAgcDBwAABhobAQAHAAdiWQAVBgMEBANeWQQPEw1dWRMbAD8rABg/KxESADkYPysREgA5ERIBOTkRMzMRMxEzMTAzNQEhNSEVARYWFxYWMzI3FQYGIyImJyYmIzcCBv4ZA0L98VaCMDBaR0k8I24xmb88JGVatALB6cb9NxR2cnFYE+4MD4quZ00AAAEABgAAA6QFzQATAC1AFQ8EBQAJBQkUFQMGBhEFEhEMa1kRBAA/KwAYPxI5LzMREgE5OREzETMzMTABFAYHESERNjY1NCYjIgcnNjMyBAOktMH+z7S/c1iNpm3f3tgBCQQhqfxh/eUCnD2/fFVuYtt97AAAAQAGAAADoARzABQALUAVDwQFAAkFCRUWAwYGEgUVEgxdWRIQAD8rABg/EjkvMxESATk5ETMRMzMxMAEUBgcVIRE2NjU0JiMiByc2NjMyBAOgt7z+z7O+c1iJpm1k2YDWAQcCx635XsMBRD28fVVuYts3RukAAAAAAwAUAAAE9AW2ABMAHAAoAOJAjQsMDCYgGAETHhQiIgMTCBgPJiYYEwMpKiEBCgF6AQIqAVoBAhkBAQgBOAH4AQMMAR4AAgEOAwICEwsdFA8dARQGFB1rWYAUkBQCRhQB1hQBFCQbSUwUAcwUAa8UvxQCOhQBGRQBHxQBDhQBCQMUFBMEBBxpWQQDMSIBBCUiARYiAQMAIgELBhMiaVkTEgA/KwBfXl1fXV1fXRg/KxESADkYL19eXV1xXV1dXStdcXErAF9eXRESORI5GC9fXl0zM15dXV1xETMREgEXOREzETMRMzMRMzMRMxEzETMRMzEwEyM1MxEhIAQVFAYHFRYWFRQEIyEBMzI2NTQmIyMRFTMVIxUzMjY1NCG4pKQBxwE3ARl7Zot7/t/4/d0BNrR+cXuFo+npyoB6/vwBXMcDk7HBg6gRCh+qjcjgA3NOWlRJ/cVax1xiZbYAAgAA/+wGDAW2ABUAHgBdQDEDIA4THBwQDAEFBRQZDBkfIAQbDg8OaVkBExIPAV8PAQAPAQkDDw8JFREDCRZpWQkTAD8rABg/MxI5L19eXV1dMzMrEQAzMxESATk5ETMzETMRMzMRMzIRMzEwAREzFSMVFAYEIyAANTUjNTMRIREhEQEyNjU1IRUUFgVerq6R/u67/ub+yK6uATUCRv7fmIn9uogFtv3u/qKi9IIBIfue/gIS/e4CEvs6n6ptb6meAAAA//8AAAAABTMFtgIGAWkAAAADALj/ZgQCBhQAEwAXABoAwkBzAAwUGBgFAQQOFQkQEBEKBwwNDQcRFhUZBAMFCRscGBoSAAgQCAIhCAYPAwEiAwMBQBEaFBppWQ5GFAHWFAESFAEDIRQBsRQBBEwUAaMUARQeDEkZFAEDDxSPFAIJBhQUAQYNFwYXaVkKBgMFAQESaVkBEgA/KxEAMxg/MysRADMREjkYL19eXV9dK11dX11xX3FdcTMrEQAzGhgQzV9eXRDOXl0REjkREgEXOREzETMRMxEzETMRMxEzETMRMzEwISEHIzcjESE3MwczFSMDMxUjAyEBMxMjETcjBAL+TifFJ9MCSBnEGD1/UqzrYQFx/eyBUtM/P5qaBbZeXv7+v/7+hwJ3AUH8yfgAAAAEAFz+VgRiBhQAHgAkACgALACgQFsECA8cHBQpCAIFFisVLCgjEA0UJSUnDSQjLCsFCAktLgUQAAsrGCknIh8VKQ8pAQ0FIilmWSjlIgGpIgFMIlwiAgMiIgALBCIOAA8fAQwGCx9fWQsQABhgWQAWAD8rABg/KwBfXl0YPz8REjkvX11dXTMrAF9eXREzERI5ERI5ERI5ORESARc5ETMRMxEzETMRMxEzETMRMxEzETMxMAUiJwMjEyYmNRAAMzIXEzMDFhYVFSEDFjMyNjcVBgYDIgYHMzcXJicHBRQXNwKaW0GRx6hxgQEZ+Coskcemcnv+RFwiPGW0YlC2rmFuCI9SywUaN/6jGzUUDv5cAeVC9LYBGQEzBgGn/iU65KSU/vQIKi7sKCcDrntx7OxxL6DLWj2XAAAB/2j+UgKmBbYAFQByQEURFwoCDxMTDAgIFhcSCgsKaVkPXQsBTAsBGAsBvwvfCwIqCwGqCwFuCwFMCwELHgxJGQsBAw8LAQkGCwsWDQMABWlZACIAPysAGD8SOS9fXl1fXStdXV1xXXFxcTMrEQAzERIBOREzMxEzMjIRMzEwEyInERYzMjY1ESM1MxEhETMVIxEQAh9pTlBCZli4uAE2uLjq/lIWAQIUf4cCAP4CXP2k/v4C/wD+9AAAAAL/ff4UAnEGFAAVAB4AW0AwESAKAg8aExMWDAgIHyASCgsKhlkPCwsADRkdAQMPHQEKBhgdY1kYAA0PAAVdWQAbAD8rABg/PysAX15dX10REjkYLzMrEQAzERIBOREzMzMRMzMyMhEzMTATIic1FjMyNjURIzUzESERMxUjERQGAzQzMhUUBiMiRnVURklNR6CgATGgoM5wpqZTU6b+FBnwE1ZUAiPHAcD+QMf9sLLBB2uVlUdPAAACAHf+FAZYBc0AHgArAElAJhQtKQQOIhoLAxkEGSwtCxwACAwDCCZpWQgEAB9pWQATFxFrWRcjAD8rABg/KwAYPysAGD8REjk5ERIBOTkRFzMzETMRMzEwBSImAjU0EjYzIBczNyERFBYzMjcVBgYjIBE1NyMGBgMyNjU1NCYjIgIVFBYCpKj9iIn/qQEHjAkeARk8MD4zGV41/poCD0vFJ6WfpKaWqqoUtQFU5ucBVbaym/nNPzwV6gsUAWm7eGddAQTz9kDd0/756+z7AAAAAgBc/hQFTgRzAB0AKQBJQCYTKycDDQohGhoXAwMqKxsJAAYLDwYlXVkGEAAeXlkAFhUQXVkVGwA/KwAYPysAGD8rABg/ERI5ORESARc5ETMzMxEzETMxMAUiAhEQEjMyFhczNyERFBYzMjcVBiMgETU0NyMGBjcyNjc1NCYjIhEUFgICxuDlx2qePAgbAQI5MzQ9Pn3+rA0NMaIDdGwFb3vXaxQBMQEQARIBNFBUj/slOUIV6h8BaUxablFU74WmJbSc/q6opgACABQAAAVIBbYAEQAaAFVALA8cDhYEAhIBAQYCCxYCFhscDgAEBQRpWRIABRAFAhADBQUHEAISBxppWQcDAD8rABg/MxI5L19eXTMrEQAzMxESATk5ETMRMzMRMxEzETMRMzEwAREhESM1MxEhIAQVFAYHASEBJzMyNjU0JiMjAe7+yqSkAaoBKgEejoIBrv6o/qOlZJOMj5ZeAjH9zwIx/AKJ2d2ByTn9gwIx/GJpaFgAAAABAAAAAAN3BHMAFwBaQDIPCgwUEQgMDA0CDREDGBkUDQALDxAPhlkIABAQEMAQ0BAECwMQEA0SAAVkWQAQEg8NFQA/Pz8rERIAORgvX15dMysRADMREjkREgEXOREzETMRMxEzMjEwATIXAyYjIgYHMxUjESERIzUzETMXMzY2AxA+KRclNXmcGNXd/s+goOctDzSxBHMJ/uIKaGHG/jkBx8YB0bxecwACAAAAAAT+BbYAEQAUAFhAMAMGBhYQDQ0TCRQJCgQKDwMVFgcUDQ4NcVkEAH8Ojw4CSQ4BDg4SEgsIAwoCEAMKEgA/PzMSFzkSOS9dXTMzKxEAMzMREgEXOREzMxEzMhEzETMRMzEwASE3IQczFSMBESERASM1MychATcjAbIBnGIBTmtr1/7y/sz+7tNoaAFQAS9q1QT0wsLH/gz9xwIvAf7Hwv2m0QACAAD+FASNBF4AGgAiAG9AOwckAh4FHwQKCBgEERoEAQgFAQUjJAsfGgAahlkIBDoAAQ8AHwAvAAMLAwAAIgIiGBgPBgIPDxRdWQ8bAD8rABg/MxI5ETMREjkvX15dXTMzKxEAMzMREgE5OREzETMzMxEzETMRMxEzMhEzMTATMwMhEyETIQMzFSMBBgYjIic1FjMyNjc3AyMBNjc3IxcWFxRvgwFObgEdbQFHfWmy/u1B8aFPTDdBUXkiEum/AjgLICGaIxsKAxQBSv62AUr+tsb9I6+uEfINY2Q3Akb+2WdbZWVScAAAAgCa/+wEfwRzABgAIgBZQC8SCBkNDRcIHRcdIyQBFAUADAEPBRkMZVkPGQEPAxkZFAUYDwUfX1kFEBQPX1kUFgA/KwAYPysAGD8REjkvX15dKwBfXl0REjkREgE5OREzETMRMxEzMTABFzM2NjMyFhUUBgcHFRQzMjcXBiMiJjURATc2NjU0IyIGFQFvOwhQo4Chufr6wq6Et2W/7uDwAS92hYKUan8EXpdjSbipsqkJBjGqUs9kw8gC5/38BARYWn96ZQAAAAACAFz/7ARxBHMAEAAcADpAHRsDDAkUAxQdHg8IAAYKDw0VBhhdWQYQABFeWQAWAD8rABg/KwAYPz8REjk5ERIBOTkRMzMRMzEwBSICERASMzIXMzchESMnIwYnMjY1NTQmIyIGFRACAsXh5cnTbwgbAQLqOw1oan1qb31mcRQBMgEPARMBM6SP+6KRpe+XmCG0nKiq/rIAAAIAoP/sBLQEcwAQABsAPkAfFA4JCQsDGgsaHB0PCAAGDA8LFQARXlkAEAYXXVkGFgA/KwAYPysAGD8/ERI5ORESATk5ETMRMxEzMzEwATISERACIyInIwchETMXMzYXIhEVFBYzMjY1EAMOxuDnx9ZrCRr+/vgrDmtq6nZ8Y24Ec/7L/vP+6/7Qo48EXpGm8P7hP6uXqqgBTgAAAAIAoP/sBLQGHwAcACkAQ0AiEicaCwMnCycqKxsJBgALFQ8UXVkPAQAdXVkAEAYkXVkGFgA/KwAYPysAGD8rABg/ERI5ORESATk5ETMRMxEzMTABMhIREAIjIicjByMRNDYzMhcVJiMiBgcVFAczNhciBgcVFBYzMjY1NCYDDsbg58fFcBUz6bOzVEw5ODAxAwwMa3BxaAJrdF5vcARz/sv+8/7r/tCPewS+q7Yf6RQ1Pg5bgqb0i6AhtJytpaWlAAEAP//sA8EEcwAXACZAFBUKCgMQAxgZEg1dWRIQAAddWQAWAD8rABg/KxESARc5ETMxMAUiJicRFhYzMjY1NCYjIgcnNjMgABEQAAGmbpdJRpJfcoiMeGSLV5bTAQMBFv7qFCUuAQUvMqimqaNC7Ez+3P7i/un+0gAAAAACAFz/kwQjBHMAHgAoAFJAKxgHABMMEQcmJhEPEwQpKg4KQAQfhVkPBAEeAwQEChYWHF1ZFhAKI19ZChYAPysAGD8rERIAORgvX15dKwAaGBDOERIBFzkRMxEzETMRMzEwARQXNjMyFhUUBiMiJwYHJzY3JjUQACEyFwcmJiMiBgEiBxYzMjY1NCYBkwmCvZmv2MTPdisfmCFEaQEjARu2olpGdkKIfwE0hGhBiEdbQAIrJjR5mYSYqU9TVURbfJfkAR4BLEzsHCal/rtuWjkxLDIAAAIAXP4UBU4GFAAeACsATUAoFC0pAwsYDiIJHBwYAwMsLQgdAAYMAAYmXVkGEAAfXVkAFhYRXVkWGwA/KwAYPysAGD8rABg/ERI5ORESARc5ETMzMxEzETMRMzEwBSICERASMzIXMyY1ESERFBYzMjcVBiMgETU0NjcjBicyNjc1NCYjIgYVFBYCAsXh5cnTbwoXATI5MzQ9Pn3+rAoDDWhqdW0Fb31mcXIUATIBDwETATOkfWIBZvlvOUIV6h8BaUw3Ziul84ijIbScraWlpQACAFz/7AVOBiMAHAApAEdAJRIrJwMYCSAgDAMDKisIGwAGGRUPFF1ZDwEGJF1ZBhAAHV1ZABYAPysAGD8rABg/KwAYPxESOTkREgEXOREzMxEzETMxMAUiAhEQEjMyFzMmNTU0NjMyFxUmIyIGFREjJyMGJzI2NzU0JiMiBhUUFgICxeHlydNvChezsGBMOTYxPeo7DWhqdW0Fb31mcXIUATIBDwETATOkfWIQu6of6RQ9PvtMkaXziKMhtJytpaWlAAAAAgBY/+wEXgRzAAYAGwBcQDUKAxIZEREEEgMcHQ8SAQ0FAxJmWeUDAakDAUwDXAMCAwMDBxYPAAEMBhYAX1kWEAcOYFkHFgA/KwAYPysAX15dERI5GC9fXV1dKwBfXl0REgEXOREzETMzMTABIgYHISYmAyImJzUWFjMyNjchNTQAMzIAERAAAkxjcAIBrApti4a5SmC1ZYKQBf0vAQju+QEX/tMDmnxwdXf8Uikm7C4qkoKU7gEO/s3+5/7x/tT//wBY/+wEXgRzAgYC3wAAAAIAXP/sBpEEcwAhACgAa0A5ICoRJgoYJSULCgMpKgAdYlkAABAAAgsDAAAHGQQlAyYYCwoDGSYKGQoZCgcVFQ5gWRUQByJfWQcWAD8rABg/KxESADk5GC8vETMRMxI5ORESOTkREjkvX15dKxESARc5ETMRMzMRMzEwASImJwcQACEiADUlJiYjIgYHNTY2MzIEFzcXFhYzMjcXBgUyNjUFFhYFpmWMKBb+5f8A8f7xAuMbjHVjuWtYvnvZASUxyx0dMy1CS0Nt/DRvff49D3EBTG+ABP7p/swBC/DLdGkmMuwsJMCzNWpqQjW0VIeci3lSXP//AE7/7AQlBHMCBgGCAAD//wBO/+wEIwRzAgYB0QAAAAEATv/sBZEEcwAzAJJAVRg1Hh8fLgkoHAMiLi4DMigENDUPHAweGhRiWRoaJgweMjMzMntZazMBWTMBuTMBBDMBdDMBYjPyMwI8M8wzAgQLMwEKBjMzJgwMBWBZDBAmLF5ZJhYAPysAGD8rERIAORgvX15dX11dXXFdcXErERIAORESORgvKxESADk5ERIBFzkRMxEzETMRMxEzETMxMAEyNjU0IyIGByc2NjMyFhc3FxYWMzI2NxcGIyInBgcVFhYVFAYGIyAnNRYWMyA1NCYjIzUBrJqF00e1TFpy04iK0zRzHR0zLStDH0NtfrdLOm6JdX7upf71kFPBWAEXjJNsArA3OnAmIdUtJ1RMH2tqQiAVtFS2ThwKIn1lZp5WRfwqLIFEQdMAAgBc/+wEugRzABMAKAB8QEkKCwsjJx0dAAcXDiMjFwADKSoKJygoJ3tZaSgBWCgBuCgBDSgBfSgBbCj8KAIDPCjMKAIECygBCgYoKBEEBBpgWQQQESBeWREWAD8rABg/KxESADkYL19eXV9dX11dcV1xcSsREgA5ERIBFzkRMxEzETMRMxEzETMxMBM0EiQzMhYVFAYHFRYWFRQEIyAAATI2NTQmIyIGFRQWMzI2NTQmIyM1XJgBFbHp+W1zhXn+3fP+6v7OAiGUendig5yIj255c5snAjGuAQiMn5ddeB4LGXVdpsIBMgGINUYxP72jsKpIP0A10wAB/33+FAJxBF4AFQA9QB4RFwoCDxMTDAgIFhcSCgsKhlkPCwsADQ8ABV1ZABsAPysAGD8SOS8zKxEAMxESATkRMzMRMzIyETMxMBMiJzUWMzI2NREjNTMRIREzFSMRFAZGdVRGSU1HoqIBMaCgzv4UGfATVlQCI8cBwP5Ax/2wssEAAAACAFz+FAVOBiMAJgAyAFNALCE0BTASGwwAGCoqDBIDMzQXDQ8VHiNdWR4BFS5dWRUQDydeWQ8WAwhfWQMbAD8rABg/KwAYPysAGD8rERIAOTkREgEXOREzMxEzETMzETMxMAUUBCEiJzUWMzI1NTcjBiMiAhEQEjMyFzMmNTU0NjMyFxUmIyIGFQEyNjc1NCYjIhEUFgRx/uf+6vWty+nrBQVr0snd5cnOdggXs7BgTDk2MT39/nhnBm9712sC8flC9Fb+Eo2lATYBCwETATOkfWIQu6of6RQ9Pvwni6Als53+rqimAAAAAgBc/hQEcQRzAAsAJgBGQCURCR4MJAMDGBYeBCcoIxkbISUPIQddWSEQGwBeWRsWDxRfWQ8bAD8rABg/KwAYPysAGD8REjk5ERIBFzkRMzMRMzMxMCUyNjU1NCYjIhEUFgUUBCEiJzUWMzI1NTcjBiMiAhEQEjMyFzM3IQJve2pve9drAnP+5/7q9a3L6esJCWvSyd3lyc52CBkBAtuNniWznf6uqKbd8flC9Fb+FomlATYBCwETATOkjwABAFz/7ARQBHMAGwBVQC4OAhsVFQcCGQcZHB0AG2ZZGQABAw8AAQsGAAAECwsSXVkLEAAXAQwGBBddWQQWAD8rAF9eXRg/KxESADkYL19eXV9dKxESATk5ETMRMxEzETMxMAEhEQYhIAARNBIkMzIWFwcmJiMiBhUQITI3NSMCVgH6wf76/vH+4pEBEL1rtWNaPodTj6kBFT9E1QJ5/ctYASkBHK8BB4woLuwgLLKa/rARvAACAAD+FASNBF4AFgAgADhAHA4iBB0ADxoRFxcaAwAEISIaCRQNBA8UH2VZFBsAPysAGD8zEjk5ERIBFzkRMxEzETMyETMxMAU0NjcBIRMWFhczNjcTIQEWFRQGIyImJTQmJwYGFRQzMgEbMFH+ZAE/xxAkCAYMNcUBP/5Uf5+GhpoBaCIkJSBFRsVPrK0De/4nJIBBY4QB1/xt+JqFoJyJLIFFRIAqcgACAAD/5QSNBHMALQA3AERAIwo5IzIaETAULi4wHRoEODkwABcnDCEnIV1ZBicQFzVlWRcWAD8rABg/MysRADMREjk5ERIBFzkRMxEzETMyETMxMAE2Nzc2NjMyFhcVJiMiDgIDFhYVFAYjIiY1NDY3JyYmIyIHNTY2MzIWFxYWFxM0JwYVFBYzMjYCUC8tSD55Sx1dHSQbGigmJsZFLqCFhpsoPpkzTisiJh5ZHFeANnomCEdFRiQiIyIC5VQ8YldFDw7hCgsZKf79a4RAfJ+bgkeCYs1EQQrhDg9QSqs7Dv4NTl5bTSU1MwAAAAEAmv4UBKIEXgAWADBAFw0KFhMDAAoAFxgEBxQLDwAbBxBdWQcWAD8rABg/PzMSORESATk5ETMzMxEzMTABETQ3IwYGIyImNREhERQWMzI2NREhEQNxDAw2nXfFyAExVl6AcgEx/hQB1SSEVVDTxgLZ/XN5eavGAg75tgABAKAAAASoBh8AHgA7QB0QARgICAkAAQkBHyAYGwEJFQ0SXVkNARsEXVkbEAA/KwAYPysAGD8zEjkREgE5OREzETMRMxEzMTAhIRE0IyIGFREhETQ2MzIXFSYjIhUVFAcHMzYzMhYVBKj+z7SAcv7Ps7NUTDk4ZAcHEGbexcwCjfKuw/3yBL6rth/pFHMSI21HpNTGAAAAAAEAoP4UBKgGHwAnAEVAIxYHAh4ODg8lBw8HKCkfIQ8VExhdWRMBIQpdWSEQAAVdWQAbAD8rABg/KwAYPysAGD8SORESATk5ETMRMxEzMxEzMTABIic1FjMyNRE0IyIGFREhETQ2MzIXFSYjIhUVFAcHMzYzMhYVERQGAz1rTTs8e7SAcv7Ps7NUTDk4ZAcHEGjgw8q8/hQZ8BOqAvDbq8b98gS+q7Yf6RRzEiNtR6TTx/yus8AAAAACAAAAAAJxBhQACwAUAGhAPAIWBwAQBAQMCQUFFRYDBwgHhlkA+ggByAgBDAgBbAh8CAI5CAEICAUKGRMBAw8TAQoGDhNjWQ4ACg8FFQA/Pz8rAF9eXV9dERI5GC9dXXFdXTMrEQAzERIBOREzMzMRMzMyETMxMAEzFSMRIREjNTMRIQE0MzIVFAYjIgHRoKD+z6CgATH+wqamU1OmAp7H/ikB18cBwAEhlZVHTwAA//8AoP/sAxcEXgIGAYYAAAABACkAAAK6BF4ACwAwQBYECAgGAQkJBgsLDA0EAQECDwcKCgkVAD8zETM/MxEzERIBOREzMxEzETMRMzEwEyc1IRUHERcVITU32bACkbCw/W+wA3VWk5NW/XRWk5NWAAABAAAAAAN7BhQAGABEQCYQGgMKFRUHFhYZGgAPXwZvBn8GAwYDDAYDXxMBzxMBExMWCAAWFQA/PxI5L11xFzMvXTMzERIBOREzMxEzMhEzMTATIgcjNjYzFxEhERYzMjY3MwYGIycRIREm8koSlgSKdCMBMR0WJSsMlgSKbin+zx0CzWuMqAMCgf0hCDQ5kaUE/ZgCxwYAAAL/7AAAAz0GFAARABoATkApAhwVCgoABAQPGAUFGxwNEoNZDw0fDQIJAw0XAwcXB4NZABcXBRAABRUAPz8SOS8zKxEAMxgQxF9eXSsREgE5ETMzMxEzMhEzETMxMAEzFSMRIREjIiY1NDYzMhcRIQEiBhUUMzM1NAJ1yMj+z0iDjX5pPzIBMf53IyViPgL0nv2qAlZxbGp6JQIi/WUjH0McaQAAAQCg/hQCrgYUAA0AIUAPBg8ACwsODwwACANdWQgbAD8rABg/ERIBOREzETMxMAUUFjMyNxUGIyImNREhAdE5MzQ9PnypqwExfTlCFeofsLkGlwAAAQCg/hQFxQYUAB4AYUA0GQMCBgwAGxscAxcGExMXHAMfIAMXX1kYDwMBDwMDAwoAHQAcFQIaAAAaXlkADwoQXVkKGwA/KwAYPysREgA5GD8/ERI5L19eXTMrERIBFzkRMxEzETMRMzMRMxEzMTABIRUBFhYVFAYEIyInERYWMzI2NTQmIyM1ASERIREhAdEDx/5F7vqQ/u3A+sBc42WepcrFdwGW/bL+zwExBF7G/mIa/9+X4HdQAQYtM4aAioPZAX/8iwYUAAABAJr/7Ac7BF4AIwA9QB8dGgIjCw0NCCMaBCQlDhQMCQAbDwwVBSAXIF1ZERcWAD8zKxEAMxg/PzMzEjk5ERIBFzkRMxEzETMxMAEhERQWMzI2NREhESMnIwYGIyInIwYGIyImNREhERQWMzI2NQNSATFRV3VqATHpKRAwrGr8WBsssWy/wgExUVdwbwRe/XN5eazFAg77oo9PVKNMV8LXAtn9c3l5oK4AAAAAAQCa/hQHOwReACQAQEAgDgsXFCAdJCEhFAsDJSYABQgeFQwPIRsaEQgRXVkCCBYAPzMrEQAzGD8/MzMSOTkREgEXOREzMzMRMxEzMTAlBiMiJyMGBiMiJjURIREUFjMyNjURIREUFjMyNjURIREhETQ3Bgpoz/xYGyyxbL/CATFRV3BvATFRV3VqATH+zw2RpaNMV8LXAtn9c3l5oK4CMf1zeXmsxQIO+bYB1T1rAAABAKD+FAdCBHMALQBKQCYgAA0KAAEbJiYBCgMuLxMNChELDwEKFSoFEQVdWRcREB4jXVkeGwA/KwAYPzMrEQAzGD8zPxESOTkREgEXOREzETMRMxEzMTAhIRE0JiMiBhURIREzFzM2NjMyFzM2NjMyFhURFAYjIic1FjMyNjURNCYjIgYVBIn+z1FXdWr+z+kpES2qbvtZGy2vbr7DvK9rTTs8QDpRV3BvAo15eazF/fIEXo9NV6ROVsPX/K6xwhnwE1ZUAtl5eaCuAAAB/8P+FASoBHMAHgA2QBsOFxQAARQBHyAXARUPARUbBV1ZGxAMEV1ZDBsAPysAGD8rABg/PxI5ERIBOTkRMxEzMjEwISERNCYjIgYVERQGIyInNRYzMjY1ETMXMzY2MzIWFQSo/s9WXoBysa1yPj0zMzrpKREzs3LDygKNeXmrxv15tb4f6hVCOQTbj1FT08cAAAAAAQCg/hQFhQRzAB4AOkAdBiAXFAALFAsfIBcUGxUPFBUbD11ZGxAIA11ZCBsAPysAGD8rABg/PxESORESATk5ETMRMxEzMTAFFBYzMjcVBiMiJjURNCYjIgYVESERMxczNjYzMhYVBKg5MzQ9Pnypq1ZegHL+z+kpETOzcsPKfTlCFeofsLkDEHl5q8b98gRej1FT08cAAAABAKAAAAUjBF4ADgAoQBMDBwEKCg0HAw8QAwoHDggPAgcVAD8zPzMSOTkREgEXOREzETMxMAERIQEWFREhESEBJiY3EQUj/o/+ABX+2QFuAgQHEQIEXvuiAw7jbf5CBF788Ef2GQG6//8AXP/sBJgEcwIGAu8AAAACAFz/7AZ/BHMAFwAjAINASxgIEhYWDR4BEAAAFAEIBCQlAg0BDgASAQ0GEhWCWboSyhICOhIBAy8SAQ8SAQoGEhIBDgEWXlkBFQ4RYFkODwshXVkLEAQbXVkEFgA/KwAYPysAGD8rABg/KxESADkYL19eXXFfXV0rAF9eXRESOTkREgEXOREzETMzMxEzETMxMCEhNQYjIiYCNRAAITIXNSEVIRUhFSEVIQEUFjMyNjU0JiMiBgZ//OFekZ7zhAEbAQKFYgMf/hIBz/4xAe77FG17emtse3psJzuNAQexARUBLTgj5cXl5gFIpqqpp6ampQAAAgBe/+wGRARzABMAJwBMQCgaCSIfERQUHwkDKCkEDQAgECACCwMgIAYNDRddWQ0QJRwGHF5ZAAYWAD8zKxEAMxg/KxESADkYL19eXRE5ERIBFzkRMxEzETMxMAUiJicjBiMiAjU0EiQzMgQSFRQCAzQmIyIGFRAzMjY1NSEVFBYzMjYElnqcKgpS7szitwFZ6OcBVbLiN/jj4PmuVEwBGExUWFYUaGrSAQvqxgEroZ/+1snr/vYB8cXb18X++HOHxcWHc4gAAP//AFz+FAYnBhQCBgHeAAAAAQAr/+wDAgReABAAKUAVCw0NCAIDERIOAAkABWRZABYJDwwVAD8/PysREgA5ERIBFzkRMzEwFyInExYzMjY1ESERIycjBgaRMzMXIzeQpQEx5y0PNLMUCAEeCpSJAjn7orxdcwAAAAABACv/7AMCBhQAEAApQBQLCA0CDRESDgAJAAVkWQAWCQAMFQA/Pz8rERIAORESATk5ETMzMTAXIicTFjMyNjURIREjJyMGBpEzMxcjN5ClATHnLQ80sxQIAR4KlIkD7/nsvF1zAAAAAAEAK/4UA9cEXgAcADhAHRQeCxkOHBwZBQMdHgADDAMIZFkDFgwPFhFdWRYbAD8rABg/PysREgA5ERIBFzkRMxEzETMxMCUGBiMiJxMWMzI2NREhERQWMzI3FQYjIiY1NTQ3AdM8pWEzMxcjN5ClATExMzQ9PnympgyqXmAIAR4KlIkCOfslOUIV6h+xuDuKaAAAAAABAKD+FAN3BHMAEAArQBUNCQkKCgIREg0KAAAFZFkAEAsPChsAPz8/KxESADkREgE5OREzETMxMAEyFwMmIyIGFREhETMXMzY2AxA+KRclNZKj/s/nLQ80sQRzCf7iCpaH+9sGSrxecwAAAAABAKD+FAN3BHMAGgAyQBkPFxcUFAIbHBcRAAAFZFkAEBUPEQxdWREbAD8rABg/PysREgA5ERIBOTkRMxEzMTABMhcDJiMiBhURFBYzMjcVBiMiJjURMxczNjYDED4pFyU1kqM5MzQ9Pnypq+ctDzSxBHMJ/uIKlof9SjlCFeofsLkE4bxecwAAAQCaAAADBAR3AAsAH0AOCwAABQwNABUDCGFZAxAAPysAGD8REgE5OREzMTAzERAhMhcHJiMiFRGaAX12dyNEPZUC3wGYIfgVpP0xAAEAK/4UApYEdwALAB9ADgsABgAMDQAbCANhWQgQAD8rABg/ERIBOTkRMzEwARE0IyIHJzYzIBERAWSVPUQjd3cBff4UBLukFfgh/mj7NQAAAgCsAAAE1wReAAgAFQBJQCUUFxMABQsLDBEADAAWFxMKBQpiWQAFARIDBQUNFQwVDQRiWQ0PAD8rABg/MxI5L19eXSsRADMREgE5OREzETMRMxEzETMxMAE0JiMjETMyNgMjESERITIWFRQHASEDTldNzbBYadOe/s8CEtPo7AFK/rIDAjtK/uZO/uX+YAResKLiU/4pAAACAKwAAATXBF4ACAAVAElAJQoXCwAUBQURDQARABYXCxQUBGJZDxQBDQMUFBEJEg8RBWJZERUAPysAGD8zEjkvX15dKxEAMxESATk5ETMRMxEzETMRMzEwATQmIyMRMzI2EyEBFhUUBiMhESERMwNOaViwzU1XOwFO/rbs6NP97gExngFcR0/+5UoDPf4pU+KhsQRe/mAAAAEAXP4UA6wEcwAvAExAKSMADBYpHgYQABYWEB4DMDEpHhYABAMhISdgWSEQAxRfWQMWDglfWQ4bAD8rABg/KwAYPysREgAXORESARc5ETMRMxEzETMRMzEwARQGIyInFRQWMzI3FQYjIBERFhYzMjU0JiYnLgI1NDYzMhcHJiYjIhUUFhceAgOs7eZDOi43MDRIXv7fVdVRpixsWoF5N+fUyr9cVJJMh1eTg3o6AUystA6UOjoS1RsBUgHDKDZgJC05JjZcd1eVo1jcJC5JKTw7NVx4AAAAAAH/w/4UAq4GIwAXAChAFBIZBQALCxgZDxRdWQ8BAwhdWQMbAD8rABg/KxESATkRMzIRMzEwBRQGIyInNRYzMjY1ETQ2MzIXFSYjIgYVAdGxrXI+PTMzOrOvYEw5NjE9ebW+H+oVQjkFO7uqH+kUPT4AAAAAAf/D/hQCrgYjAB8AUEApGgIhEQkABAQTDw8gIQMREhFgWQAPEgEKBhISBxcXHF1ZFwEHDF1ZBxsAPysAGD8rERIAORgvX15dMysRADMREgE5ETMzETMyMhEzMzEwATMVIxEUBiMiJzUWMzI2NREjNTMRNDYzMhcVJiMiBhUB0aCgsa1yPj0zMzqgoLOvYEw5NjE9AuHl/Yu1vh/qFUI5AnnlAd27qh/pFD0+AAH/w/4UAq4EcwAXAChAFBIZBQwXFxgZCANdWQgQFA9dWRQbAD8rABg/KxESATkRMzIRMzEwEzQmIyIHNTYzMhYVERQWMzI3FQYjIiY1oD0yOzNIZK+zOTM0PT5yrbEDBD49Feofqrv8dTlCFeoftrMAAAAAAv9G/hQCrgYjABkAJABYQDAHJiIWFg0QAB0PEB0QJSYECV1ZBAEQHxkfg1kNDxkfGQIJAxkVABoBEgYTGoNZExsAPysAX15dGD9fXl0zKxEAMxg/KxESATk5ETMRMxEzMhEzETMxMDMRNDYzMhcVJiMiBhURMxUjBgYjIiY1NDYzAzI2NTUjIgYVFBags69gTDk2MT2LjQ20oYWil4sgKDAxODkoBL67qh/pFD0++0yen6+EbHmD/rwyNT82Jh8rAAAAAAEAQgAAA0oFYAAVAGhAQgsJAhAJEg4QDhYXDBAJEREQYFkfEQEPER8RLxFvEX8RnxGvEb8RCA0DEREFDhUFAF1ZXwXvBQIwBVAFAhAFMAUCBQAvXXFdKwAYPxI5L19eXXErEQAzETMREgE5OREzMxEzETMxMAEiBzU2MzIWFREzFQcHIzUhNSERNCYBAlBwcqa1qZKoWMP+xwE5SQRtI+MzuLr95YFm7O7lAhtBPgAAAQAv/hQDNwVMABUAQEAfCggPExMIEQMIAxYXDQ9ACRIMDw8SYFkPDwUAXVkFGwA/KwAYPysRADMRMxoYEM0REgE5OREzETMRMxEzMTAFMjcVBiMiJjURIzU3NzMVIRUhERQWAndQcHSktqiSqFjDATn+x0n4I+M0urkD8oFm7O7l/A5BPgAAAAIAAP/sBUIEXgAXAB8AYkAzFSEKDx0dDAgTFxAbFwEBGwgDICECAA0WHAoLCmVZEw8PCwENAwsLABENDwAVBRhdWQUWAD8rABg/PzMSOS9fXl0zMysRADMzERI5ERIBFzkRMxEzETMRMzMRMzIRMzEwIScjBgYjIiY1NSM1MxEhESERIREzFSMRJTI2NyEVFBYDuCkQMbRzxciamgExAaYBMaCg/d1reQ7+WlaPTlXTxmTHAa7+UgGu/lLH/hfffY0YeXkAAAAAAQAz/+wE/AReAB4ARkAkCQ0WHBMZFQoGDQMDBhUTBB8gGQYXChYXFmBZBxcPEABeWRAWAD8rABg/MysRADMSOTkREgEXOREzETMRMxEzMxEzMTAlMjY1NCYnNSEVIxYWFRQAISAANRA3IzUhFQYGFRQWApiKmGptAhnwY2T+zP75/vf+zc32AhtwaZjZmpGBtkLh5UbUgOj+9QEJ4AEPleXhQbl/kZoAAQCa/+wExwRzABsAL0AYFQ8JBgAPBg8cHRgTXVkYEAcPAwxdWQMWAD8rABg/PysREgE5OREzETMRMzEwARAAISAAEREhERQWMzI2NRE0JiMiBzU2MzIWFQTH/vD+9/72/vYBMW+AbW84NTI9VG+iqAH+/vb++AEAARICYP2Wj4SBkgEOO0IV6h+4sQABAAAAAASNBF4ACwAaQAsBDQoMBQsCChULDwA/PzMSOREBMxEzMTABASEDJicjBgcDIQEC4wGq/sHXJwYGCCXY/sEBqgRe+6ICfYNiaXz9gwReAAABABQAAAbFBF4AGgAiQBAZBhscEwsCAw8EGg8ZBw8VAD8zMz8zEhc5ERIBOTkxMAETEzMTIQEhAyYnIwYHAyEDJiYnIwYGBwMhAQKiVnUGzAFIAT7+0YIfIAYFLor+sYQIKQIGCSoOhv7VAUIEXv55/hIDdfuiAfCH51/m/ecCGSPqOEfpPv4QBF4AAAEAAAAABI0GHwAWAClAEwAJFhAXGBYFBQ0BCRUNEl1ZDQEAPysAGD8zEjkRMxESATk5MjMxMCEhAyYnIwYHAyEBNjYzMhcVJiMiBgcHBI3+stIaCwYNHs/+uAHZQfGhWEQ1RFF5IhICSkt3Z1v9tgTDrq4R8QxjZDcAAAABAAAAAASNBF4ACAAiQA8ICgQBAgIJCgYCBwQPAhUAPz8zEjkREgE5ETMyETMxMAERIREBIRMTIQLf/s/+UgFY8usBWAGi/l4BogK8/lgBqAAAAAABADf+FARzBF4AFQBKQCcUFwcDCw4OAwMMCAUEFhcLCAkJCF5ZCQ8GDAUFDF5ZBRUAEV9ZABsAPysAGD8rERIAORg/KxESADkREgEXOREzETMRMxEzMTABIiY1NSE1ASE1IRUBIREUFjMyNxUGA8WKj/2LAgb+GQNC/ggCCjMzMjFM/hSjnay0AsHpxv1R/n9CNBLVGwAAAAIAN/9OBJoEXgAXACAAWkAuCA0DBQwaIAUUGhoFCQYEISICBhEdhVkREQYMCQoKCV5ZCg8gBw0GBg1eWQAGFQA/MysREgA5Mhg/KxESADkSORgvKwAYEMQREgEXOREzETMRMxEzMzMxMCEGByc2NyE1ASE1IRUBMzY2MzIWFRQGIzcyNTQmIyIGBwJoNBOiFR7+hQIG/hkDSv38e120fHGFxc8ZnSEgMEYmfTU5OEG0AsHpuP1DtZGLb52Y6VAdI0tFAP//ADn+FARWBF4CBgLnAAAAAgAA/hQEVgReACMALwB+QEcnHBwGLQoPBwsEDwASFhYAIi0EBTAxHyRlWQAfEB8CCwMfHxQFCwsEYlkPCx8LTwsDCQMLCxkKBwgIB15ZCA8UGRkqX1kZGwA/KxEAMxg/KxESADkSORgvX15dKxEAMxI5GC9fXl0rERIBFzkRMxEzETMzETMRMzIRMzEwJTQmIyM1ASE1IRUBHgIVFAYHFwcmJwYGIyImNTQ2MzIWFzYFIgYVFBYzMjY3JiYDId3DZgGV/bIDx/5GmNt0PTRMizAiVbh63PG7oXjRaxH+OzxDaVhAiSROjx2NmsoBZ+nG/n8Rh9iGZKc3SYI5IjArrJx+mV1mKx8vLzI4HRZHTgABAAYAAAOkBh8AEwAtQBUPBAUACQUJFBUDBgYRBRURDF1ZEQEAPysAGD8SOS8zERIBOTkRMxEzMzEwARQGBxEhETY2NTQmIyIHJzYzMgQDpLfA/s+2v3NYjaZt397YAQkEc6v4Yf2RAvA+vXtVbmLbfewAAAEAMQAAA88GHwAUAC9AFgYQDAAQEQARFRYSDw8EERUECV1ZBAEAPysAGD8SOS8zERIBOTkRMxEzETMxMBM0NjYzMhcHJiMiBhUUFhcRIREmJjF5243e322mjVd0v7b+z7q9BGh+ym9922JtVnu9Pv0QAm9V9QAAAAEABv/sA6QGFAAVAC1AFQcTEAANEA0WFxMQEAQRAAQKXVkEFgA/KwAYPxI5LzMREgE5OREzETMzMTABFAYGIyImJzcWMzI2NTQmJxEhERYWA6R85pRuyHJtqItXdMKzATG6vQGifchxOUPcY21WfL49Avn9iFb1AAEAXP4UA90EcwAVAChAFA4DCBQDFBYXBgxdWQYQABBdWQAbAD8rABg/KxESATk5ETMRMzEwASAAERAAITIXByYmIyARECEyNjcRBgJo/vf+/QEdAQjCmlpWXi3+8QEPSINNhf4UAY0BlgGTAalM7Cga/b790S4z/vtUAP//AHf/7AXnBc0CJgAyAAABBwB5AgwAAAALtgIAFhwGACUBKzUAAAD//wCgAAAEywReAgYBzAAAAAIATv/sBKwEcwARACQAfEBJExwKCQkXFwYhDAAcHAwGAyUmChQkJBR7WWkkAVgkAbgkAQ0kAX0kAWwk/CQCAzwkzCQCBAskAQoGJCQDDw8eYFkPEAMaXlkDFgA/KwAYPysREgA5GC9fXl1fXV9dXXFdcXErERIAORESARc5ETMRMxEzETMRMxEzMTABEAAhIiQ1NDY3NSY1NCQzIAAFFSMiBhUUFjMgERAhIgYVFBYzBKz+tf7l8/77hXXbAQLfASMBO/4AJY2Db24BIf7ZYHGFiQIx/ur+0bSkYYcTCSy7mav+1KHTNkM/RAFaAWA7OUQzAAABAFz/7AUSBh8AJgBdQDMhKAwGBhQPCiYaGgoUAycoDQxmWRkNAQMPDQELBg0NERgeI11ZHgEYA11ZGBARCF1ZERYAPysAGD8rABg/KxESADkYL19eXV9dKxESARc5ETMRMxEzETMRMzEwASYmIyIGFRAhMjc1IzUhEQYhIAARNBIkMzIXNTQ2MzIXFSYjIhUVA+M+h1OPqQEVP0TVAfrB/vr+8f7ikQEQvUVJoZxPPjk3ZQMxICyymv6wEbzL/ctYASkBHK8BB4wPWqy1H+kUc7AA//8AoAAABKwEXgIGAdcAAAAD/0b+FAJcBhQADwAYACMAakA7IQsLAhQFEA8cBAUcBSQlGRcBAw8XAQoGEhdjWRIAAA8FHg4eg1kCDw4fDgIJAw4VABkBEgYIGYNZCBsAPysAX15dGD9fXl0zKxEAMxg/PysAX15dX10REgE5OREzETMzETMzMhEzMTATIREzFSMGBiMiJjU0NjMzAzQzMhUUBiMiAzI2NTUjIgYVFBagATGLjQ20oYWil4s4DaamU1OmSygwMTg5KARe+6Ken6+EbHmDBX+VlUdP+dMyNT82Jh8rAAEAAP4UBFYEXgAOADdAHAUDBwQLCA4MDAQDAw8QBwQAAwgIAwkFDwMVDBsAPz8/MxI5ERczERIBFzkRMzMzETMRMzEwAQcBIQEBIQE3ESERIRE3AzGF/sf+qAG8/ikBYAFCgwEx/s8QAf6q/qwB5QJ5/jxoAVz5tgLs/gABAKAAAAOkBF4ABQAfQA4DAAAFBgcBDwADXlkAFQA/KwAYPxESATk5ETMxMDMRIREhFaABMQHTBF78i+kAAAAAAgBc/hQFTgYjACAALABNQCgTLioDDRoZJAodHRoDAy0uHgkABhobEBVdWRABBihdWQYQACFeWQAWAD8rABg/KwAYPysAGD8REjk5ERIBFzkRMzMzETMRMxEzMTAFIgIREBIzMhYXMyY1NTQ2MzIXFSYjIgYVESERNDcjBgY3MjY3NTQmIyIRFBYCAsbg5cdqnjwIFbOwYEw5NjE9/s4NDTGiA3RsBW9712sUATEBEAESATRQVHJtELuqH+kUPT75YAHVPWtRVO+FpiW0nP6uqKYAAAABAAYAAAOkBh8AGwBPQCcJFxQDGRcSFhYbFw4DFwMcHRUZGhllWRIaGhcRAAALFxULBl1ZCwEAPysAGD8SOS8zEjkvMysRADMREgE5OREzETMzETMRMxEzETMxMBM2NjU0JiMiByc2MzIEFRQGBxUzFSMRIREjNTP8tr9zWI2mbd/e2AEJt8Dd3f7PuLgC8D69e1VuYtt97MCr+GGKxv7hAR/GAAEAMQAAA88GHwAcAE1AJgYSFBcMDAAQFBQZFQAVHR4TFxgXZVkQGBgVGg8PBBUVBAldWQQBAD8rABg/EjkvMxE5LzMrEQAzERIBOTkRMzMRMxEzETMRMzMxMBM0NjYzMhcHJiMiBhUUFhcRMxUjESERIzUzNSYmMXnbjd7fbaaNV3S/tri4/s/d3bq9BGh+ym9922JtVnu9Pv71xv7hAR/GilX1AAAAAAMAXP/sB88GFAAXACQAJwBkQDYiCBMmJhAOGxUAACcWGwgFKCkmFgEBFl5ZARUVJRMTJV5ZEw8RAAMNBQsLH11ZCxAFGF1ZBRUAPysAGD8rERIAOTkYPz8rERIAORg/KxESADkREgEXOREzETMzMxEzETMxMCEhJyMGIyICERASMzIXMyY1ESERIRUBIQUyNjc1NCYjIgYVFBYBEQEHz/u4Ow1o1cXh5cnTbwoXATIDS/4JAgr6nnVtBW99ZnFyAm4B8ZGlATIBDwETATOkfWIBZv5Kxv1RCoijIbScraWlpQKW/VoCpgACAFz+FAg5BhQALQA6AINARgEZGBwiAzgLFgMDExExGS0cKSktMQsEOzwAGRksX1kZGSAWFAAEFQYQCA4ONV1ZDhAYAhYWAl5ZFg8ILl1ZCBYgJl1ZIBsAPysAGD8rABg/KxESADkYPysREgA5ORg/PxESOS8rEQAzERIBFzkRMxEzETMzMxEzETMRMxEzETMxMAEBIREjJyMGIyICERASMzIXMyY1ESERIRUBFhYVFAYEIyInERYWMzI2NTQmIyMFMjY3NTQmIyIGFRQWBP4Blf3e6jsNaNXF4eXJ028KFwEyA5v+Ru36kP7vwfrAXONlnqXKxXf9b3VtBW99ZnFyAfYBf/yLkaUBMgEPARMBM6R9YgFm/krG/mIa/uCX4HdQAQYtM4aAioM+iKMhtJytpaWlAAQAXP9OCL4GFAAlADIANQA+AItASjURHyEQODADDjQ0CwkpPiEYODghKQMEP0AeIhU7hVkVFSIODAAkCAAGEDMODjNeWQ4PHD40AxEiIhFeWSIVBi1dWQYQACZdWQAWAD8rABg/KwAYPysREgAXORg/KxESADkREjk5GD8REjkvKwAYEMYREgEXOREzETMRMzMzETMRMxEzETMzMzEwBSICERASMzIXMyY1ESERIRUBMzY2MzIWFRQGIyMGByc2NyEnIwYnMjY3NTQmIyIGFRQWAREBEzI1NCYjIgYHAgLF4eXJ028KFwEyA1T9/Hphs3pvhsTPnjQTohUe/bA7DWhqdW0Fb31mcXICbgHx4p0hIDBGJhQBMgEPARMBM6R9YgFm/kq4/UO5jYlxnpd9NTk4QZGl84ijIbScraWlpQKW/VoCpv10UB0jS0UAAgAvAAAGOQVMACAAMABeQDIPHQUDCiEhAxUvHSYmLy0DBDEyFSYdAwANDRNgWQ0QCQpABwoEMAowYFkKDwAjYFkAFQA/KwAYPysRADMRMxoYEM0/KxESABc5ERIBFzkRMxEzETMRMxEzETMxMCEiJjURIzU3NzMVITYzMhcHJiYjIhUUFhYXHgIVFAYjARQzITI1NCYmJyYmNTQ3IQIftKqSqFjDAgJdU8q/XFOTS4clXWeEejn99P2qhQHvpiRia7N/Cf8At7wCBoFm7O4VWNwkLkkbJS8nM1t4UqiyAWqFXyImMCtGk3sUKgACAC/+FAVGBh8AJQAtAF5AMSAvBSsTERgrKxEAGSgLEQsuLx0iXVkdARcYQBUYEioYKmBZGA8OJl1ZDhYDCF1ZAxsAPysAGD8rABg/KxEAMxEzGhgQzT8rERIBOTkRMzMzETMRMxEzETMRMzEwBRQGIyInNRYzMjY1NQYjIiY1ESM1NzczFSE1NDYzMhcVJiMiBhUBMjcRIREUFgRosa1yPj00MzlyprenkqhYwwFFs7BgTDk2MT7+D1Bw/rtJebW+H+oVQjmcM7m5AhuBZuzuXLuqH+kUPT78LyMCd/3lQT4AAAACAC//7AcZBUwAMAA7AIZARwsZLQUmJCsvLyQSAzQeGTo6HgMFJAU8PR40ITEUAxYAFjGFWRYWHAgID11ZCBAqK0AoKyUuKy5gWSsPIQBdWSEWHDdfWRwWAD8rABg/KwAYPysRADMRMxoYEM0/KxESADkYLysREgA5ORESOTkREgEXOREzETMRMxEzETMRMxEzETMxMCUyNjcmNRAAITIWFwcmJiMiBhUUFzYzMhYVFAYjICcGBiMiJjURIzU3NzMVIRUhERQlIgYHFhYzMjY1NAKBNok5JwEjARpbpFlaRXdCiH4IqJSZs9jE/u+XZOVttKqSqFjDASX+2wPPNYA3HGNGRlvfLSdcmgEeASwiKuwcJqSuJyNpmYSYqaFRULa8AhuBZuzu5f3rhbQ2MCg6OTFeAAEAKf4UBxkGHwAxAGlANx4QAicWFCUTExgUJxAvCAgQFAMyMygQKxAUFRwhXVkcASsLXVkrEBglFRIlEmBZJQ8ABV1ZABsAPysAGD8rEQAzETMYPysAGD8rABg/MxESORESARc5ETMRMxEzMxEzETMRMxEzMTABIic1FjMyNjURNCMiBhURIREhESERIzU3NTQ2MzIXByYjIgYVFSEXMzY2MzIWFREUBgWua007PEA6tIBx/s7+8v7PqKi8z557TlxOQToB+CkSMbNyxMq8/hQZ8BNWVALw26vG/fIDefyHA3mTUlK/sC/gHU08Ro9PVdLI/K6xwgAAAQCg/+wFdQYUACUASUAnEBwAIiIjFgscBAQLIwMmJwsWBBwEHw4kACMVDhRgWQ4QHwJfWR8WAD8rABg/KwAYPz8REhc5ERIBFzkRMxEzETMRMxEzMTABFjMyNTQmJicmJjU0NjMyFwcmJiMiFRQWFxYWFRQGISInFSERIQHRztzNJ1tennzexMS0XEuHRHlVgqR99f779bX+zwExATtwXiUtNChDoIGVo1jcJC5NKkI2Q5pvtrRTPwYUAAACAKAAAAUvBhQACAALAEBAIQAKCgYCBQULAwYEDA0HAAIJAAAJXlkADwoDBgYDXlkGFQA/KxESADkYPysREgA5GD8REgEXOREzETMRMzEwASEVASEVIREhEREBAdEDTP4IAgr7cQExAfIEXsb9UekGFP1h/VoCpgACAAAAAARKBbYADAAZAHRASwsbBBEWCQEODgkGExgJEwkaGw0TFg4XFAYREAcKAQYJAAYDBCARUBFwEYARoBGwEdARBy8DXwN/A48DrwO/A98DBxEDEQMQBAMQFQA/PxI5OS8vXV0REhc5ERIXORESATk5ETMRMxEzETMRMzIyETMxMAEDAyMDMxMTMxMTMwsDIwMzExMzExMzAwKee33Zzb5/gdF3g8HR23t92c3BfIHRd4PB0QMGAe7+EgKw/icB2f4lAdv9UPz6AfL+DgKw/icB2f4lAdv9UAAAAgCgAAAEOQW2AAcADwBQQCkOBgYPBwoCAgsDBwMQEQsADwEJAw8IQAgNYFkIAwAFYFkPAAEAAAMHFQA/MzMvXSsAGD8rABoYEM1fXl0yERIBOTkRMzMRMxEzMxEzMTATIREjESERIxEhESMRIREjoAOZ8f5J8QOZ8f5J8QIG/foBIf7fBbb9+gEh/t8AAAAAAf/X/hQEogRzAB8AOkAdDhUJHhsCHwkfICEDBhwPHxsRDF1ZERAGGF1ZBhYAPysAGD8rABg/PxI5ERIBOTkRMzMzETMyMTAFNDcjBgYjIiY1ETQjIgc1NjMyFhURFBYzMjY1ESERIQNxDAw2nXfFyFg4M0xgrppWXoByATH+zxckhFVQ08YBf3sV6h+ovf7DeXmrxgIO+bYAAAAAAf/X/hQFdwRzACgAR0AlBioZIBQmCgANDQoUAykqDhEnDxwXXVkcEBEjXVkRFggDXVkIGwA/KwAYPysAGD8rABg/EjkREgEXOREzETMRMzIRMzEwBRQWMzI3FQYjIBE1NDcjBgYjIiY1ETQjIgc1NjMyFhURFBYzMjY1ESEEojEzND0+ff61DAw2nXfFyFg4M0xgrppWXoByATF9OUIV6h8BaSONZFVQ08YBf3sV6h+ovf7DeXmrxgIOAAABAJ4BhwPHBhQAFAAtQBUODAgICQABCQEVFgoADhEBCVQEEVcAPzM/MxI5PxESATk5ETMRMxEzMzEwASMRNCMiBhURIxEzFRQHMzYzMhYVA8fwjWZX7+8MDk+vm58BhwHqtIaP/ncEje0/iHuekwAAAQCeAYcDxwYdAB4AM0AYEQEYCQkKAAEKAR8gGAobEw4BAQpUBRtXAD8zPzM/MxESORESATk5ETMRMxEzETMxMAEjETQmIyIGFREjETQ2MzIXFSYjIhUVFAczNjMyFhUDx/BDSmZX74+JPkEtLU4MDlGtm58BhwHqWVuGj/53A42BiBewEFYOPGR5nJUAAAAAAv+6ABcBmAYUAAwAFAAuQBkCEQoKDQcHFRYFAAAQAEAAUAAEABMPAAhWAD8/My9dMhESATkRMzMRMzIxMDciJzUWMzI1ETMRFAYDNDMyFRQjIlhdQTM+c++iWISBgYQXErQOfwN//GCGkAWPbm5xAAEAngGHAtcE2wAQACFADw0KCgIREg0KC1YKVAUAVwA/Mj8/ETkREgE5OREzMTABMhcHJiMiBhURIxEzFzM2NgKHNBwQGi5ygO+2IwosiATbBtUGcGX+VgNGjklTAAAAAAEAMQF5Am0EzQAQACNAEQsNDQgCAxESDgwJVgxUBQBVAD8yPz8SORESARc5ETMxMBMiJzcWMzI2NREzESMnIwYGgzIgExgvcoDwtyMMLIkBeQbVBnBlAar8uo1KUQAAAAEALQAXAw4EzQAaADhAHxMcCxcOGhoXBQMbHAADDBAAFRAVQBVQFQQVDFYIA1UAPzM/L10zERI5ERIBFzkRMxEzETMxMAEGBiMiJzcWMzI2NREzERQzMjcVBiMgETU0NwF7LYNMMiASGi5ygO9OKi4wYf78CgIGRUgG1QZwZQGq/FxcELAWAQ4tVV8AAgCeAYcD4QTNAAgAFQA4QBoKDQsAFAUFEQ0AEQAWFwsEBBQUEQkSVgURVAA/Mz8zEjkvMxI5ERIBOTkRMxEzETMRMxEzMTABNCYjIxUzMjYTIQEWFRQGIyERMxEzAq5SRYqgQUAvAQT+/rm4o/5h73sCizY70zoCav6gQad6hANG/skAAQAnAYcFZATNAB0AH0AQCBweHwgbAgwWAAYHEVYHVAA/PxIXORESATk5MTABAgMjBwYDIQMzExYXMzY2NxMhExYWFTM2NjcTMwMDZFhFBiYwSv7++O1lHxQEAh4JawEIZgodBgQhDmnp/AGHAX8BGaDC/soDRv6Ng48upCEBkv5uJ6gkI7wzAXP8ugABABQAFwOmBM0AFQAqQBgJFQ8ABBYXBBUVEgANEA1ADVANBA0IAFYAPzIvXTM5ETMREgEXOTEwEyETFhczNjcTIQEGBiMiJzUWMzI3NxQBB6YQDAQJGKQBAP6NM7x/RzIiPIM1DwTN/ikzX0dLAdf8UIODDLYKlSkAAAD//wAZA8EBpAW2AgYCBwAA//8AGQPBA3cFtgIGAgsAAP//ABkDwQGkBbYCBgIGAAD//wAZA8EBpAW2AAYCCQAAAAH/gQSaAH8GdQANACdAGwMEBAsPCgEPCi8KTwpfCn8KnwqvCs8K7woJCgAvXXEzMy8zMTADNCYjNTIWFRQGIzUyNh82Km6QkG4qNgWHLTOOhGhtgo0zAAAB/4EEmgB/BnUADQAnQBsLCgoDDwQBDwQvBE8EXwR/BJ8ErwTPBO8ECQQAL11xMzMvMzEwExQWMxUiJjU0NjMVIgYfNipukJBuKjYFhy0zjYJtaYOOMwAAAQAQA6ICYAYhABEAKUASCREADgMAAxITEAEBCwAABgsAAD8zMy8SOS8zERIBOTkRMxEzMzEwEzU2NTQmIyIHJzYzMhYVFAcVoNE7NlVXRHGamK3zA6LRK3AwOzefQIt/ykNoAAEAIQOiAnEGIQARACtAEwgRDgMRAAMAEhMBEBAGAAALBgEAPzMzLxI5LzMREgE5OREzETMRMzEwATUmNTQ2MzIXByYjIgYVFBcVARTzq5qacURXVTY70QOiaDvJgJNAnzc7MHAr0QD//wBYABQEOQRJAAcAHwAA/0kAAP//AFgAFAQ5BEkABwAhAAD/SQAAAAEALQA7BGIEHQAGABhACQMGBwgBBAADBAAvLzMSORESATk5MTAlAQEjATMBA3P+5/7D8AHwjwG2OwKs/VQD4vweAAAAAQAtADsEYgQdAAYAGEAJAwAHCAUDAgYDAC8zLxI5ERIBOTkxMAEBIwEzAQEEYv5Kj/4Q8AE9ARkEHfweA+L9VAKsAP///5oExQBmBikCBgTuAAD///+GBNkBxwYhAAcAdv46AAAAAP///jsE2QB8BiEABwBD/O8AAAAA////mv5LAGb/rwEHBO4AAPmGABW2AAADoAMCA7j/wLMOEEgDABErXTUAAAD///6v/pIBVf9lAQcBTf2U+bkAFUAOAA8AXwACAABQAMAAAwAAEV1xNQAAAP///uD+XwEh/6cBBwBD/ZT5hgAgsQAEuP/AQBIJC0hABAEABFAEcASgBPAEBQQAEV1xKzX///7g/l8BIf+nAQcAdv2U+YYAILEABLj/wEASCQtIQAQBAARQBHAEoATwBAUEABFdcSs1AAL/SgAAALYESgACAAUAIEASDwUfBQIAARABAgUBBQECBBUCAC8/Ejk5Ly9dXTEwEwMDASETtra2AWz+lLYESv7jAR37tgEdAAH/SgMtALYESgACABNACQABEAECAQEDAgAvEjkvXTEwEwMDtra2BEr+4wEdAAAA////gQHhAH8DvAAHBKwAAP1HAAD///+BAeEAfwO8AAcErQAA/UcAAP///yn+VgDX/54CBgT9AAD///8p/lYA1/+eAgYE/gAA////Kf4tANf/xwIGBP8AAAAB/yn+vADX/zcAAwAIsQABAC8zMTADNSEV1wGu/rx7ewAAAAAB/rwBTAFGAzMADgAdQBAFbwoBCgoNfwHPAQIwAQEBAC9dXTMzL10zMTABJRcWFjMyNxcGIyImJwf+vAEfHR00LEJLRG1/ZoooUgLlTmhnRTW0VG+AFgD///8vBJEA0QYzAgYFHAAAAAIAFAAXA6YEzQAWAB8AQEAkCQoKGRwADxkRFw4XGQMABAYgIR4ZCQQAFBAUQBRQFAQUDQRWAD8zL10SOTkzERIBFzkRMxEzETMRMxEzMTA3NDY3ATMTFhYXMzY3EzMBFhUUBiMiJiU0JwYGFRQzMvI4Lv68+pwNHAYECyqa+v6wZH5nbHcBGjUdGjc19EuWXAKc/p0aXjRJZgFg/VK5dGN4eWJSZDNgH1YAAAABAJ4BhwGNBhQAAwAWQAkAAQEEBQIAAVQAPz8REgE5ETMxMAEjETMBje/vAYcEjQABAGoBeQMEBNsAJAAvQBcYAAYeEwAMEwwlJhMeDAAEAxwWVwoDVQA/Mz8zEhc5ERIBOTkRMxEzMxEzMTABFAYjIiYnNRYWMzI1NCYmJyYmNTQ2MzIXByYmIyIVFBYXHgIDBLy7WoJEQqNEgyFSTI5itqegk0hBdDprSm5pXi0Cf4CGFBu+HilHGiIrHzdwYW16QaQaIzcfLiopRVoAAAAAAQArAYcDugTNAAsAL0AXAQsGAwkFBwcJAAsEDA0JAwsEAVYIC1QAPzM/MxI5ORESARc5ETMRMzMRMzEwAQEhExMhAQEhAwMhAVT+5QEPrKoBEP7hASv+8ri7/vIDMwGa/vYBCv5m/lQBH/7hAAAAAAEARAGHAxkGIQATACpAEwUPCwAPEAAQFBURDg4QCAMBEFQAPz8zEjkvMxESATk5ETMRMxEzMTATNDYzMhcHJiMiBhUUFhcRIxEmJkTRp7CtVINvRVuXjvCSkgTZk7VcpklQQVuPL/3JAddBtwAAAAABAKAAAAK4BYEABQAYQAkAAQMBBgcBAwQALzMvERIBOTkRMzEwISMRITUhAriH/m8CGAT6hwAAAAEAoAAAArgFgQAHACBADQAFAQMBCAkDBAQGAQYALy8SOS8zERIBOTkRMzMxMCEjESE1IREzAriH/m8BkYcDqIcBUgAAAQCgAAACuAWBAAcAJEAQAAUBAwEICQMPBAEEBAYBBgAvLxI5L10zERIBOTkRMzMxMCEjESE1IREzAriH/m8BkYcCnIcCXgAAAQCgAAACuAWBAAcAIEANAAUBAwEICQMEBAYBBgAvLxI5LzMREgE5OREzMzEwISMRITUhETMCuIf+bwGRhwFWhwOkAAABAKAAAAK4BYEABQAYQAkAAwEDBgcCAQQALy8zERIBOTkRMzEwISE1IREzArj96AGRh4cE+gAAAAEAoAAAArgFgQAFABhACQIFBQQGBwIFAAAvLzMREgE5OREzMTATMxEhFSGghwGR/egFgfsGhwAAAQCgAAACuAWBAAcAJEAQAgYGBwcECAkFDwIBAgIHAAAvLzkvXTMREgE5OREzETMxMBMzESEVIREjoIcBkf5vhwWB/aKH/WT///5O/mkBs/+xAQcBTP2U+ZAAJrEACbj/wEAYCQtIQAkBAAlACVAJcAmQCaAJ4AnwCQgJABFdcSs1AAD///5WBMwBqgZdAAcFHgAA/q8AAP//ABkDwQN3BbYCBgILAAAAAf6q/hQBVv/bAAYAGLUFAwIcBgO4/8CzDxtIAwAvKzM/EjkxMAUBIwEzExMBVv7dZP7borS2Jf45Acf+7gESAAH+qv4UAVb/1wAGABi1BQEEABwBuP/Asw8YSAEALys/MhI5MTABATMBIwMD/qoBI2QBJaK0tv4UAcP+PQEP/vEAAAAAAf8A/hQBAgAvAAYAGEAPABwPAx8D3wMDA0APEkgDAC8rXT8xMAElNSUVBQUBAv3+AgL+sgFO/hTcZNuNf4EAAAH/AP4UAQIALwAGABhADwMcDwAfAN8AAwBADxJIAAAvK10/MTAlBRUFNSUl/wACAv3+AU7+si/bZNyOf4EAAP///uACRAEhA4wABwBD/ZT9awAA///+QwJAAcADiAAHAVP9p/1nAAAAAv5CAj8BvgOHAAkAEgArQBsEDoAAPwoBHwpPCgKvCu8KAhAKASAKAYAKAQoAL11xcl1xcjMazTIxMAEmJic1IRYWFxUhJiYnNSEWFxUBHVHeHwEtFW4//dFZ2BwBLTuHAj87xzIUKa1XG0DGLhRyuxsAAP///mP+WQGg/5ABBwFS/ZT5ggAjsQAHuP/Asw4QSAe4/8BADQkLSAAHUAegB/AHBAcAEV0rKzUA////UwGHALEGFQAHAB3+3gGiAAAAAf9WBB8AqgVzAAUAE0AJAAVgBQIFBQMAAC8yMi9dMTADIRUjFSOqAVTnbQVzbecAAf9WBB8AqgVzAAUAE0AJAAJgAgICAgQFAC8zMy9dMTATESM1IzWqbecFc/6s520AAf9WAecAqgM7AAUADLMBAQMAAC8yMi8xMAMRMxUzFapt5wHnAVTnbQAAAAAB/1YB5wCqAzsABQAMswQEAgEALzMzLzEwEyE1MzUzqv6s520B523nAAAAAAH+N/5WAcn/qAAHABdACwQAAQEBAQYgAgECAC9dMzMvXTMxMAEhETMVITUzAcn8bnMCqnX+VgFSy8sAAAH+N/5WAcn/qAAFABC2BAGAIAIBAgAvXRrNMjEwASERMxUhAcn8bnMDH/5WAVLLAAH+L/4UAdEAmgAJAA60AAgDCAUALzMzETMxMBMBNQEVBSEVIQUx/f4CAv7yAq79UgEO/hQBEWQBEXmLe44AAP///k4E2QGzBiEABwFL/ZQAAAAA///+rwTZAVUFrAAHAU39lAAAAAAAAf5WBekBrAa8AAMARUA0Aw8AfwACEAIPAAE9HwAvAN8AAw8AHwBPAP8ABI8AnwCvAM8A3wD/AAYAQB8kSABACQ9IAAAvKytdcXJeXV9eXTIxMAEhFSH+VgNW/KoGvNP///53BNkBigYrAAcBTv2UAAAAAP///1sE6QCnBhQABwFP/sgAAAAA///+qwT4AVkGBAAHAGr9lAAAAAD///zZBMP+oAakAgYCYwAA////BwTXAP0GsgAHAVD9swAAAAD///5DBNkBwAYhAAcBU/2nAAAAAP///k4E2QGzBiEABwFM/ZQAAAAAAAH/mgTFAGYGKQADAAuzAoADAQA/Gs0xMBMRIxFmzAYp/pwBZAAAAAAC/uwExQEUBikAAwAHAA+1BgKABwMAAD8zGs0yMTADESMRIREjEUjMAijMBin+nAFk/pwBZAAAAAL+lgTZAWoG0wANABkANUAVEU8XXxdvF68XvxfPFwYXDQ8GAQYKuP/4QAsQE0gGCg8DXwMCAwAvXTMzKy9dM8RdMjEwAQYGIyImJzMWFjMyNjclNDYzMhYVFAYjIiYBagvDoKa4CJUIc1hYcQr+qkY7OUpKOTtGBiuetKymV1NcTis/Pj4/PD8/AAAAAf53BNkBiQYrAA0AIUAUSQtZC2kLAwsPAwEDAwcPAF8AAgAAL10yMi9dM10xMAE2NjMyFhcjLgIjIgf+dwrbqKzQCaoELlNY0A0E2Ze7s58vNhh9AAAA////PQPBAMgFtgAHAgb/JAAAAAD///89A8EAyAW2AAcCB/8kAAAAAP///z0DwQDIBbYABwIJ/yQAAAAA////PQPBAMgFtgAHAgf/JAAAAAD///7g/l8BIf+nAQcAQ/2U+YYAILEABLj/wEASCQtIQAQBAARQBHAEoATwBAUEABFdcSs1///+4P5fASH/pwEHAHb9lPmGACCxAAS4/8BAEgkLSEAEAQAEUARwBKAE8AQFBAARXXErNQAB/0z+QgC0/8cABwAXswcAAAW4/8C0Cg1IBQIAL80rOS8zMTAHMzUzESM1I7Thh4fhvoX+e4UAAAH/TP5CALT/xwAHABezAQYGA7j/wLQKDUgDBAAvzSs5LzMxMBMjFSMRMxUztOGHh+H+x4UBhYUAAf6TBNEBbQZqAAUAE0AKBQAPA18DrwMDAwAvXcQyMTABIREjESH+kwLaiP2uBmr+ZwEfAAH/RgRQATMGFAAIAAyzAwMACAAvMzMvMTADNjY1IRcGBge6ZE4BLQ4j6eEFCBZ3fxbOxRsAAAAAAf+D/hQAe//jAA0ADrQKCwsEAwAvMzMvMzEwAzQ2MxUiBhUUFjMVIiZ9hnIqNjAwcYf+/Gl+jTAqKjCOfgAAAAH/Kf5WANf/ngAHABlADAEFBQAEAQQEEAcBBwAvXTMvXTMRMzEwFxUzFSE1MzVEk/5Sk2LNe3vNAAAAAAH/Kf5WANf/ngAHABlADAAAAQAGAgACEAMBAwAvXTMzETMvXTEwAzUjNSEVIxVEkwGuk/5WzXt7zQAAAAH/Kf4tANf/xwALAC1AG78KAQAKEAoCEAogCgIKzwMBAwoIAAMFHwEBAQAvcTMzMzIyL10vXXFdMTADNTM1MxUzFSMVIzXXk4iTk4j+vHuQkHuPjwAAAP///q/+oQFV/3QBBwFN/ZT5yAARQAsAAABQAHAAwAAEAAARXTUAAAAAAf45/hQAAABqAA0AELYJAAVfWQAbAD8rABgvMTABIic1FjMyNjURMxEUBv7dWEwxMjUx/pT+FBvVEjc/AQL+/rCkAAABAAD+FAHHAGoADQAQtgQACV9ZABsAPysAGC8xMAEiJjURMxEUFjMyNxUGARmKj/4zMzIxTP4Uo50BFv7+QjQS1RsA///+q/5yAVn/fgEHAGr9lPl6ABdADwEAAAkBAAkgCVAJoAkECQARXXE1NQAAAv8G/hQA/P/fAAsAFgAeQAsUAAMQAyADAwMPCbj/wLMNHkgJAC8rM8RdMjEwExQGIyImNTQ2MzIWBzQmIyIGFRQzMjb8kW1viYdxb4+eNSsrNWArNf78aIB+aGd+gWQqMDAqWjAAAP///1X+OwCt/4MABwI5/fcAAAAA////Hv4UAOUAAAAHAHr/QwAAAAD///82/hQAzAAAAAcBUf8sAAAAAP///5r+SwBm/68BBwTuAAD5hgAVtgAAA6ADAgO4/8CzDhBIAwARK101AAAAAAH+k/5CAW3/ngAHABC2AwcFLwABAAAvXTIvMzEwBSERIzUhFSP+kwLaiP42iGL+pOHhAAAB/n3+lgGF/4MAFgAmtBMNDQMHuP/AQA4JDEgHChYHAw8QHxACEAAvXRczLyszMxEzMTAFFAYjIicGIyImNTMUMzI2NTMUMzI2NQGFd2pvNTVvaHdvcDNAYnMzQH1xfEdHenOHQkWHQkUAAP///k7+aQGz/7EBBwFM/ZT5kAAmsQAJuP/AQBgJC0hACQEACUAJUAlwCZAJoAngCfAJCAkAEV1xKzUAAP///k7+ZwGz/68BBwFL/ZT5jgAksQAJuP/AQBYJC0hACQEACUAJUAlwCZAJoAnwCQcJABFdcSs1///+d/5HAYr/mQEHAU79lPluAByxAAm4/8BADgkLSA8JAQAJUAlwCQMJABFdcSs1///+d/5HAYn/mQEHBPEAAPluAByxAAm4/8BADgkLSA8JAQAJUAlwCQMJABFdcSs1///+Y/5ZAaD/kAEHAVL9lPmCACOxAAe4/8CzDhBIB7j/wEANCQtIAAdQB6AH8AcEBwARXSsrNQD///6v/pIBVf9lAQcBTf2U+bkAFUAOAA8AXwACAABQAMAAAwAAEV1xNQAAAAAB/iH+vAHf/0gAAwAIsQECAC8zMTABITUhAd/8QgO+/ryMAP///ln+MQGr/9MABwIF/l0AAAAA///+YQGnAZ4C3gAHAVL9kvzQAAD///5WAc8BrAKiAQcE5gAA++YALrEAALj/wLMVGkgAuP/AsxITSAC4/8BADgwQSDAAAQAAEABwAAMAABFdcSsrKzUAAAAB/QoBzwL2AqIAAwAIsQMAAC8yMTABIRUh/QoF7PoUAqLTAAAB/poBKwFoA1AAAwAIsQMBAC/NMTABARcB/poCZGr9ngHhAW+0/o8AAf5W/4kBrAYUAAMACbICAwAAPy8xMAEBIwEBrP1vxQKRBhT5dQaLAAH/hf4UAH3/4wANAA60BAMDCgsALzMzLzMxMBMUBiM1MjY1NCYjNTIWfYdxMDA2KnGH/vxqfo4wKiowjX4AAAAB/pP+QgFt/54ABwAVQAoEbwHPAQIBAQYCAC8zMy9dMzEwASERMxUhNTMBbf0miAHKiP5CAVzi4gAAAAAC/zX+LQDP/8cAAwAHABdADAQQASABMAEDAQEHAgAvMzMvXTMxMBMhESEBMzUjz/5mAZr+2bS0/i0Bmv7TwAAAAf59/pYBhf+DABYAJLILABG4/8BADgkMSBENExMHDwMfAwIDAC9dMzMRM8QrMjIxMAE0NjMyFzYzMhYVIzQjIgYVIzQjIgYV/n13am81Nm5od25xM0BiczM//pZxfEhIenOHQkWHQkUAAAAAAf8vBJEA0QYzAAsAKEAeDws/C08LbwufC/8LBh8LLwtPC18LfwsFC0ATF0gLABkvK11xMTADNxc3FwcXBycHJzfRVnt5WHt7WHl7VnkF21h7e1h5e1Z5eVZ7AAAB/30EnACDBu4AFwAlQBuvDwEPLwOfAwIPA08DXwN/A68DvwPPA+8DCAMAL11xxF0xMAM0NjMVIgYVFB4CFRQGIzUyNjU0LgKBknI8OyUrJZB0PDslKyUGRktdZy4cFTU+RyhMXmgsHBk4PkUAAAAAAv5WBh0BqgeuAAMABwAjQBYF3wYBBkAJDEgGBgIvAT8BbwF/AQQBAC9dMzMvK10zMTABITUhNSE1IQGq/KwDVPysA1QGHYOLgwAA///+OwTZAHwGIQAHAEP87wAAAAD///+GBNkBxwYhAAcAdv46AAAAAP///mME1wGgBg4ABwFS/ZQAAAAA////VQRwAK0FtwEHAjj99/+jAAeyAAMDAD81AP///m0EvAGWBngBBwFV/bP/xAAVQAwCAQDvFAEUQBIVSBQAEStdNTU1AAAA////lP4UAQH/lgAHB5f95gAAAAAAAf5CBLwBvgYZAAcAIUAVBWAAcAACAAADDwdfB38HrwfPBwUHAC9dMzMvXTIxMAEhESM1IRUj/kIDfIf9kocGGf6j4uIAAAD///5Z/jEBq//TAAcCBf5dAAAAAP///uz+SwEU/68BBwTvAAD5hgAXtwEAAAOgAwIDuP/Asw4QSAMAEStdNTUAAAH/Qv4UAL7/hQAFABdADF8DAQMDBQ8AHwACAAAvXTIyL10xMAchESM1I74BfIf1e/6P9gAB/ocEiQF3Bi0AGwA4QCECBQAHExAVDgsYBxEYAw8OHw4vDq8OBA4VDgsDBA8AAQAAL10XMi9dFzMvLxESOTkREjk5MTATIicHJzcmIyIGByM2NjMyFzcXBxYzMjY3MwYGmD9NSWVKJBwsKg59CXVjR0NFZUYiIisqC30OcwTbKXs8dhE3PHaQJXM7cRI7N4GFAAAAA/6HBHsBdweHABcAIwAvAGJASS0nGyEJFAUUJwMLDBsMKwyrDAQMEQwJIQTPAAECPwBPAG8AAz8ATwBvAJ8AvwDPAN8A/wAIDwCfAK8AvwAEAEAWGUgAQAwQSAAALysrXXFyX10XMi9dFzMvLy8zLzMxMBMiLgIjIgYHIzY2MzIeAjMyNjczBgYFNDYzMhYVFAYjIiYRNDYzMhYVFAYjIiaYLlRMRB4sKg59CXhgM1VJQSArKgt9DnP+iUY7OUpKOTtGRjs5Sko5O0YFfyMrIzc8eI4jKyM7OIGFiT8+Pj88Pz8CUD8+Pj88Pz8AAAAAAv6eBNcBZAbhABcALwBWQDsUCRQFrwy/DM8MAwxACQ1IDBEJDAMPAB8AfwCPAAQAISwsHa8kvyTPJAMkQAkMSCQpISQABA8YXxgCGAAvXRczLytdMzMvLy9dFzIvK10zMy8vMTATIi4CIyIGByM2NjMyHgIzMjY3MwYGAyIuAiMiBgcjNjYzMh4CMzI2NzMGBo0rTUU9HComDH0JcFwwT0Q8HSkmCX0Lb10rTUU9HComDH0Jb10wT0Q8HSkmCX0LbwX2GyEcKi5sfxwhGy0rcXr+4RshHCoua4EcIRwtLHJ6AAAAAf6a/j8BZv+4AAkAErcGCQMDAQQDAgAvFzMRMzMxMAElFTM1BQU1IxX+mgEAzAEA/wDM/vy8f3+8vX9/AAAAAAH/Rv4UAL7/zQAGABK3BQICIAABAAMAL81dOS8zMTADNSM3FyMVQni8vHj+FPbDw/YAAAAY/SUAAALbBbYABQAJAA0AEwAZAB0AIQAnAC8ANwBBAEkAUwBdAGcAcQB5AIMAjACWAJ4AqACwALoAz0B3DBwSCxsmGCQ2Mg8yAT8yTzJfMgOmuLihsz+zT7MCW29vVmp2fn5yekI4OEY8iJGRhI0QjSCNAlFlZUxgARGdr6+ZqxCrIKsCLiowKkAq4CoDJDKzano8jWARqyoqqxFgjTx6arMyJAsSFiAmJh8VIwMHDw8IBBIALzMzMxEzMy8zMzMRMzMSFzkvLy8vLy8vLy8vL10RM3ERMzMRMxEzETMzETNxETMzETMRMzMRMxEzMxEzETMzETNdETMzETNdcREzETMQxDIQxjIxMAEjNSM1IQUhNSEBIxEzASMVIxEhASE1MzUzJSMRMwEhNSEFIREzFTMBNDMyFRQjIhE0MzIVFCMiASI1NDMyFhUUBiEiNTQzMhUUAzQzMhUUBiMiJhE0MzIVFAYjIiYBNDMyFRQGIyImETQzMhUUBiMiJiUyFRQjIjU0ITIVFCMiJjU0NgEyFRQjIjU0NiEyFRQjIiY1NDYlNDMyFRQjIhE0MzIVFAYjIiYBNDMyFRQjIhE0MzIVFAYjIiYC22zTAT/9x/68AUQCOWxs+4nRbgE/BHf+wdNs+rhubgMP/rwBRP3C/sFu0QFlNzc3Nzc3Nzf+eTg4GxwcA2w4ODf2ODcfGBkfODcfGBkf/X03OB8ZGB83OB8ZGB8DGzc3OPz8ODgbHBwDVzc3OBz84Dg4GxwcAi43Nzc3NzceGRke/qA3Nzc3NzceGRkeBHfRbm5u/IUBQgHL0QE/+kpv0/kBQvyDb28BQtMEKzc3OPy7Nzc4Ab83Nx4ZGR43Nzc3AXc3NxwcHP2dNzccHBwCmzc3HBwc/Z03NxwcHOI3Nzc3NzceGRkeAWE4NzcZHzg3HhkZH7Y3Nzf8+zg4GxwcA1c3Nzf8+zg4GxwcAAAAAf9UBLgApAZSAAwAGUAQDMAPBi8GXwZ/Bq8GzwYGBgAvXRrOMTADFhcVBgcjNTY3Jic1luBaesAWKZNpUwZScBmMHmdpHEgzNGYA////AwSuAAEGiQAGBK2CFAAC/pYE2QFqBtMADQAZADFAIAcAAAEAAAoPAx8DXwOfA98DBQM2F0YXAgMXDxFfEQIRAC9dMzNdL10zMy9dMjEwATY2MzIWFyMmJiMiBgcFFAYjIiY1NDYzMhb+lgvDoKa3CZUIc1hZcggBVEY7OUpKOTtGBYGetKunV1NeTCs/Pj4/PD8/AAAAAf89/jcAw/+8AAsAB7ALABkvMTAHNxc3FwcXBycHJzfDVmlkYWVnVmlkYWWaVmZkYGRpVmdlYWQAAAAAAf9U/hQApP+uAAwAF0AOEAAgADAAAwBPBV8FAgUAL13EXTEwEyYnNTY3MxUGBxYXFY3aX3jBFyyRdUj+FG0dix5naB5HOixnAAAAAAH/VP4UAKT/rgANABdADhAHIAcwBwMHTw1fDQINAC9dxl0xMAcWFxUGBgcjNTY3Jic1ltNnNLBWFimTaVNSah+LDUsuaRxIMzRmAAAC/of+FAGP/64ADwAdACNAFAsDCQ8WAxAWIBYwFgMWTxBfEAIQAC9dxF3GEMYyETkxMAM2NjczFhYXFSMmJwcGByMBFhcVBgYHIzU2NyYnNQwdWRWLFVIeaCw6GCwhaP6q1WQvq18XJ5ZuT/4zOeRDVNoyGD2oPHM2AZNqH4sMSDJpGko2MWb//wAABK4A/gaJAAYErH8U//8AyATpAhQGFAAGAU81AAAB/yf+FADZ/7IADgAeQBMOAgsFCAcGEAkgCTAJAwlPAAEAAC9dxF0XOTEwBzMHNxcHFwcnByc3JzcXSJAbfy2PaHM/P3Nojy1/To9FhxRjVoSEVmMUh0UAA/53/hQBh//FABMAHwArAC5AGQwCDhcpKQoQDgEOHB0jIw4EYABwAIAAAwAAL10yMjIRMz9dMzMRMxE5OTEwBzIXNjMyFhUUBiMiJwYjIiY1NDYFFBYzMjY1NCYjIgYHNCYjIgYVFBYzMjaqczc1c2d4eGdzNTlxZ3h4AUZAMzg7QjEyQWxBMjJBOzgzQDtQUHViZXVSUnVlYnXXNj09NjY8PDY2PDw2Nj09AAH/PQTFAMMGRAAHADizQAIBArj/wEAgCQxIAs8F3wUCBQIEBQQPBx8HLwdPB18HfwfPB+8HCAcAL10zMxEzL10vK3ExMBMHIzcjNzMHw0ZpJfxGaSUFuvWJ9ooAAf8//hQFk/+qAAwAHEAQCUADUAOQAwMDVwcBAwcAGwA/MjJdL10zMTABICQnMxYEMyA3MwYEAm/+vf5lUs1UATrVAaWzzF/+Zv4Uz8dhasvIzgD///8/BLIFkwZIAQcFOwAABp4AG0AUABAAAQ8ALwBvAH8ArwDPAO8ABwAAEV1xNQAAAf9CBNcEDgVoAAMAGUAQAQ8CLwJfAs8CBAJAEBNIAgAvK10zMTABITUhBA77NATMBNeR////Qv7YBA7/aQEHBT0AAPoBABpACwBQAnACkAKgAgQCuP/AswkLSAIAEStdNQAAAAH/PwTbBZMGFAAVACFAEw0ADwcBB0ANEEgHBAsHAw8RAREAL10XMy8rXTMzMTATIgYHIzY2MzIFBDMyNzMGBiMiLgKcS2gUlhHBl8EBFAETpZ0slQ/Am4Dm3NkFSjk2lqM2NmyRpiIpIgAAAAH/PwSyBZMGSAAMAClAHFgHAQdAAFAAkAADAAAEDwovCl8KfwqvCs8KBgoAL10zMy9dMl0xMAEgBBcjJiQjIAcjNiQCZAFDAZtRzFT+xtX+W7PNYQGZBkjPx2Fqy8nNAAH/P/4rBYv/zQAGAA60BAYCBgAALzIyETMxMAchNQUFNSHBBTIBGv7m+s6+i9HRiwAAAAAC/rwCKQFGBQAAFgAgAB5ADgEAHgoKEgBYDBJbGgNZAD8zPzM/EjkvMxE5MTATJwYjIiY1NDY3NzQjIgcnNjYzMhYVESUUFjMyNjU1Bwa+HlyWdX2lumJ/VoNCQp9jjJP+Qy4gTVlikgI1b3t1am1uCQR0PYcgMo2D/kXVJiNSQSUGCgAC/q4CKQFSBQAABgAaAB1ADxIDQAkNSAMDBwANWxUHWQA/Mz8zEjkvKzMxMBMiBgchJiYDIiY1NDYzMhYVFSEWFjMyNxUGBgo7TAYBGQJJJLHItqScrv4nA15Vfnw6cQR5SE5GUP2wu6uwwaqVXVFdOJQbFgAAAAAC/5MCNQBvBgYABwALAB1AEwYPAh8CLwJ/Ao8CnwIGAgpaCVgAPz/EXTIxMAM0MzIVFCMiEyMRM21vbW1v0cjIBaheXlz86QK/AAAAAv6sAikBVAUAAAsAFwAOtRUJWw8DWQA/Mz8zMTABFAYjIiY1NDYzMhYFFBYzMjY1NCYjIgYBVLefmbm0opi6/iNBSEdAQEdIQQOWrMHFqKjCxaVlZGRlZGJiAAAB/q4CKQFUBPQAEwAUQAkCEQlaAFgNBVkAPzM/PzM5MTATJyMGBiMiJjURMxEUMzI2NREzEboaCiB3S4GFyXdRTMkCNVowNoR8Acv+ZJdpfgFM/UEAAAAAAf7ZAikBJQUAABMADrUKBVsOAFkAPzI/MzEwEyARNDYzMhcHJiMiFRQzMjcVBgYv/qq6r3VuPGNEnp5uXi9qAikBaLS7L5Qp1cw7pB0WAAAC/qoCKQFYBgYAEgAeAC5AHAkRAC8MAQ8MHwwvDAMMQBATSAwGD1gaBlsTAFkAPzI/Mz8QxitdcRI5OTEwAyImNTQ2MzIXMyY1NTMRIycjBicyNjc1NCYjIgYVFEKBk5eEiUoGD8maJglFRkxJA0hSREkCKcCqrcBmWjHh/C9caJhVZxRyYWto0AAAAAH+rgI1AVQGBgAUAClAGS8KAQ8KHwovCgMKQBATSAoRDhEBCVgEEVsAPzM/MxI5EMQrXXExMAEjETQjIgYVESMRMxUUBzM2MzIWFQFUyXRVS8nJCApCk3+HAjUBmpdveP62A9HHNXBmgn4AAAH90QI1Ai0FAAAhACJAEBMNCRAKWhsBCVgeBAQWEFsAPzMzETM/MzM/ERI5OTEwEyMRNCMiBhURIxEzFzM2NjMyFzM2NjMyFhURIxE0IyIGFWTIb01GyZkbDBxzRqM9Ehx0R39/yW5QQgI1AZqXaX7+tgK/WjA2Zi83eob+NQGal2tlAAH/EAI1AO4FAAAQABRACQ0KC1oKWAUAWwA/Mj8/EjkxMBMyFwcmIyIGFREjETMXMzY2rCwWDxIpXm3JmB8KJXYFAAS0Bl1V/pkCv3c+RQAAAf6uAikBVAT0ABMAFEAJAhEJWgBYDQVZAD8zPz8zOTEwEycjBgYjIiY1ETMRFDMyNjURMxG6Ggogd0uBhcl3UUzJAjVaMDaEfAHL/mSXaX4BTP1BAAAAAAH+gQI1AX8E9AALAA61BQkBWgBYAD8/MzkxMAMBMxMWFzM2NxMzAWb+59GNGwQEAxqN0/7nAjUCv/5wS0Y7VgGQ/UEAAf6BAjUBfQT0AAsAFUAJCQMLBAFaCAtYAD8zPzMSOTkxMAMDMxc3MwMTIycHI4Xu5I+P5PD645qc4wOcAVjg4P6o/pnw8AAAAQApBG8B3QW2AAkAFLcJBQoLCYADBgA/GswREgE5OTEwEzY2NyEVBgYHIykgUBUBLyuSRbIEiTW0RBRGrz4AAAEAKf49Ad3/hQAJABhACgQACgsEgA8IAQgAL10azRESATk5MTAFBgYHITU2NjczAd0iUBP+0SuSRbKWObNBFUavPgD//wCK/hQB9/+WAAcHl/7cAAAAAP//AD//7APBBHMCBgRDAAD//wBc/+wD3QRzAiYARgAAAQcBTwGu/LEAFEAOASAWMBZwFgNBFhoNEiUBK101AAD//wA//+wDwQRzAiYEQwAAAQcBT//9/LEADrkAAf/GtBgcBAolASs1//8AP/74AdMEcwAGAB4AAAADAFr/7ASeBh8AGQAjAC8AakATJwICDxUtCBwcLQ8DMDEFH19ZBbj/wEApCQxIBRgYKoVZMBhAGAIAGBAYcBiAGAQJAxgYDBISJIVZEgEMGl1ZDBYAPysAGD8rERIAORgvX15dcSsAGBDGKysREgEXOREzETMRMxEzMTABBhU2NjMyFhUUBgYjIAAREAAhMhYVFAYjIhMyETQmIyIHFhYTIgYHFhYzMjY1NCYBui89u2bI7YX3n/7x/uYBTgElp7fIoJdh2WhYqGsIfrY6ciEiaDVHRkEEQoG5Tlzrx5/xhAF+AWkBiQHDkHh+lfzfAQtofYWrwASFQTcZHTImKS3///+IAAAGJQX1ACcCUQEfAAABBwFU/bH/lwAoQBUBfxmPGQIAGT8ZUBnfGe8Z8BkGGQG4/9a0GxsUFCUBKzUAEV1xNf//AAAAAAUGB1YCJgJRAAABBwBqABkBUgAXQA0CASsFJgIBBBYoFAglASs1NQArNTUAAAD//wBc/hQGJwYUAgYB3gAAAAH/7P4UBR8EXgApAFVALRcgKA8SAg0eEiINKAgIDQsSBCorHg0PHw8PFRkUX1kZDwklYlkJFQAFX1kAGwA/KwAYPysAGD8rABg/PxI5ORESARc5ETMRMxEzETMRMxEzMjEwASInNRYzMjY1NSQRNDcBITYSNTQjIgcnNjMyFhUUBwEhAhEUFjMyNxEQA/xXTTIwNzD+/Br+Kf70TlaLKyUxRGq1rxAB7wEMxkhaLyv+FBvVEjc/kDcBVnRT/bSUAVyf8hDRHNDUWlgCVv6I/tR/cBD+jf6sAAIAdwAABecFzQAOABoAOUAdDwkEBQAVFQUJAxscAwYGEmlZBgYMBRIMGGlZDAQAPysAGD8SOS8rEQAzERIBFzkRMxEzETMxMAEUAAURIREkABEQACEgAAEUFjMyNjU0JiMiBgXn/uX+//7L/vn+6AFtAU0BTwFn+9W9tri7u7a5vANq+f7DJP7wAQ4kAToBAQEjAT3+w/7aqra1q6y1uAACAFz+FASYBHMADwAbADVAGxAJBAUAFhYFCQMcHQUbDBldWQwQBhNgWQMGFQA/MysAGD8rABg/ERIBFzkRMxEzETMxMAEUAgcRIREmAjUQACEyFhIFFBYzMjY1NCYjIgYEmMq8/s+4zQEeAQOh9oT8+217emtse3psAjHk/twr/hYB6iwBKN8BEgEwjP76sKaqqaempqUAAAEAdwAABNEFywAVADJAGRIIAw0ICQ0JFhcGCmtZBgYQCRIQAGlZEAQAPysAGD8SOS8rERIBOTkRMxEzETMxMAEiBhUUFjMzESERJgA1EAAhMhcHJiYDJbS7xMFn/svy/vwBbAFC1ddkUqUEybKvrrD99gEXKQEs/gEcAUVn/Cc6AAAAAAEAXP5aBCcEcwAlADxAHQYnGgwMABQgACAmJyMgABEUDBcdX1kXBAldWQQQAD8rABgvKxESADkREjkREgE5OREzETMRMxEzMTATNBIkMzIXByYjIgYVFB4CFxYWFRQGIyImJzUWMzI2NTQmJyYmXIsBAq/WuVqZkIuGI0ZrR7KS8Ns/hRlfY1tiTmff2QHuyQEkmFDoQsPERVMyIBIvl4KlsxMI5SEzPi00FCzmAAAAAQC4AAAEBgW2AAsAW0AyBAgGAAABCAkBCQwNCQkBBgYLaVnIBgFZBgEMBgENBh4MSQ8GAQ8DBgYCARICBWlZAgMAPysAGD8SOS9fXl0rXl1dXSsREgA5GC8REgE5OREzETMRMxEzMTAhIREhFSERIREhNSMB6f7PA0794wH4/v72Bbb+/of+E/AAAAABAKD+FAPfBF4ACwBOQCgECAYAAAEICQEJDQ8JAQsDCQkMBgYLYFkPBgENAwYGAgEbAgVgWQIPAD8rABg/EjkvX15dKxESADkYL19eXREBOTkRMxEzETMRMzEwASERIRUhESERIzUjAdH+zwM//fIB4fbr/hQGSuX+ov4v6wAAAAH/9v/sA9kFywAjAGJANgoAIRIPAQQTHBwWDwQWIQQhJCUSAWlZEiQUSWoSAUgSAQwSAQ0DEhIeDAwHbFkMBB4ZbFkeEwA/KwAYPysREgA5GC9fXl1dXSsrERIBOTkRMxEzETMRMxEzETMRMzIxMAEhEzY1NCYjIgcnNjMyFhUUBwchAwYVFBYzMjcVBiMiJjU0NwJo/cWFGzAgLCgzT3WTnB4tAjuXGysjMTNLYYidIQJ3AbpWLxwcF9EjhXlTapf+BlYnGh8S1xh/cF5tAAABAFz+FAQMBiMAJgBQQCodEgkAJRIiFgMOABERDhYSBCcoEyURAAQmQBISCx8fGV9ZHwELBl9ZCxsAPysAGD8rERIAORgvGs0XORESARc5ETMRMxEzETMRMxEzMTABAwYVFBYzMjcVBiMiJjU0NxMFNRM2NTQmIyIGByc2MzIWFRQHAyUEDPUbNTkyM05vmqMhzf2Z4RsvHRE0EzNPeIqYIqQCYgJ//WJHODw0EtcZiYhPXQIpgcACUEI/Hx0MC9Ejhn5VZP5EfQAAAAAB/+wAAARGBc0AGABHQCYQAAsGFQICBgUKBBkaCwgGAwQADQAEAQsDBAkECRIAEhINaVkSBAA/KwAYPxI5OS8vX15dERIXORESARc5ETMRMzMyMTAhEhE1BSclJicFJyUmIyIHJzYhIAARFAIHAo2B/tlaAXERKP5mXAGBUnWTiYGqAQoBTwFXSDsBOQEtJaya12dT75vgNVDna/5P/leY/qaBAAH/Zv4UA6wGHwAWAEFAIg0FFQoCEQAAAgQJBBcYBAIFAwMHCQoDDQgDCAMVDA0BFRsAPz8zEjk5Ly8SFzkRFzkREgEXOREzETMzMzIxMCU0JwUnJSYnBSclJiUDBAAAERQCByESAncI/oc4AZAhNf57OgFUqP7mZQFRAe8BBlZM/sem+GJCe6qBfmN/rm/KYwEnPP6c/dH+osn+fpMBWwABAK7+FAffBbYAKQBHQCQkEA0YFSAdAAAVDQMqKwYBCh4WDgMaEgoSaVkDChMkJWlZJCMAPysAGD8zKxEAMxg/MzMSOTkREgEXOREzMxEzETMzMTAlIwYjIiYnIwYGIyImNREhERAzMjY1ESEREDMyNjURIREQBCEhESEyNjYGtgxz44i4NQw11XnYygE1w4d/ATXDiX0BNf7g/tn7FgTwgYMOoLRealtt1+MEEPxC/viwugNc/EL++LzVAzX6Xv719QEAX88AAAEAmv4UBzsEXgApAEdAJCUPDBgVIR4AABUMAyorAQYJHxYNDxsSCRJdWQMJFiUmYFklGwA/KwAYPzMrEQAzGD8zMxI5ORESARc5ETMzETMRMzMxMCUjBiMiJyMGBiMiJjURIREUFjMyNjURIREUFjMyNjURIREQBCEhNSEyNwYXDWjP/FgbLLFsv8IBMVFXcG8BMVFXdWoBMf7l/tr7rARc/wmRpaNMV8LXAtn9c3l5oK4CMf1zeXmsxQIO+7b+8PDm7QABAFwAAATsBcsAIQBAQCAhEQYbEA0UERsRIiMVHhgYCWpZGBgRDgMREh4DalkeBAA/KwAYPz8SOS8rERIAORESATk5ETMzMxEzETMxMAEmJiMiBhUUFjMyNjY1ESERIRE0NyMGBiMiAhE0ADMyFhcC9CM+K2FxeXRvhz8BNP7KCws+xYDm8QES8ERrOwSiDRKFe5GEX8K2ATP6SgHbL2BcagEaAQXwARgZGAAAAQBc/hQEcQRzABwAPkAfHA4FFw0KEQ4XDh0eEhQaDhsaAl1ZGhALDxQHXlkUFgA/KwAYPz8rABg/ERI5ERIBOTkRMzMzETMRMzEwASYjIgYVEDMyNjURIREhETQ3IwYjIgIREBIzMhcCgyIZUmPXb2YBMv7ODQ1rzsnh8OVCUgNvDKSs/rCozQIO+bYB1T1rpQErARIBHwErFwAAAQC4/gAFGwW2ABsAOUAdEgcDAwQMGAQYHB0JAGpZCQkEBRAValkQIwUDBBIAPz8/KxESADkYLysREgE5OREzETMRMzMxMAEiBxEhESERNjMgABEUAgQjIicRFjMyEjU0JiYCf09C/soBNmp9ARQBMpP+9LGkhYGFl61HmQLFIf1cBbb99C3+g/6e7v6osi8BEC8BBeOczGUAAAAAAQCg/goEWgReABsAN0AcBhcTExQADRQNHB0ZEGFZGRkUFQ8UFQQKYVkEHAA/KwAYPz8SOS8rERIBOTkRMxEzETMzMTAlFAIGIyInERYWMzI2NTQmIyIHESERIRE2MzISBFp/5ZSOci15MXWHfIYyMP7PATFOYOT3d8D+5pMzAQcYHsOqs6sY/j8EXv5cI/7CAAAAAAEAVv/sBBkFywApAE1AKSUYHwAMEhgGBhIAAyorBhgAHwQVDw8BDAMPDycVFQlpWRUEJyJpWScTAD8rABg/KxESADkYL19eXREXORESARc5ETMRMxEzETMxMBM0Njc2NjU0JiMiBhUUFwUmJjU0JDMyFhUUBgcOAhUUFjMyNxEGISIkVqW/wG9RVE9XEv76GiEBAdjW+6jPgWkza2rM4rz+/PD+/gGLks1JSGJWQlRVUyomaSdxMbze28On10crOkgwS1Re/v5c0wAAAAEAMf4UA9UEcwAsAE9AKycZIQAOExkICBMAAy0uCBkAIQQWDxEfEQILAxERKhYWC2BZFhAqI19ZKhsAPysAGD8rERIAORgvX15dERc5ERIBFzkRMxEzETMRMzEwFzQ2Njc+AjU0JiMiBhUUFwcmNTQ2MzIWFRQGBgcOAhUUMzI2NxUGBiMiJDFNhph3YjFbS0hcK+dK8tPP60ySjX1dMuxgsWZlu3ru/vgtcap/TTpQZUVRUlJNUU9cc4Oz1M6zfLmMRT1LXDvZLDTqMCbpAAACADkAAAUKBcsAGwAeAF1ALxEaBxsMHBwBHRsYARoeHgEbAx8gGAEcBBwMGxQPBAkJBGtZCQQZAB0bGx1pWRsSAD8rERIAOTkYPysREgA5ORI5ORESOTkREgEXOREzETMRMxEzETMRMxEzMTA3AScmIyIHJzYzMhYXNjYzMhcHJiMiBgcHARUhAQEhOQHoDz5ZLDZCV11egzw2hFhXYEQtLTJBGg8B0/svAnH+ywJasgN9H4cS3ylPYV5SKewTRTYd/H+wA0j9ugAAAAACAB8AAAQxBF4AGgAdAF1ALxEZBxoMGxsXHBoXARkdHQEaAx4fFwEbBBsMGgkUBAkEX1kPCQ8YABwaGhxgWRoVAD8rERIAOTkYPzMrEQAzERI5ORESOTkREgEXOREzETMRMxEzETMRMxEzMTA3ASYmIyIHJzYzMhYXNjYzMhcHJiMiBwcBFSEBAyEfAZEnOiYwMjhHW150MjVzWFtHNzIxQzUPAXv77gIS5wG2rgJgQjEU0h87REU6H9IUVhf9mKwCRP6hAAAAAAIAd//sBcUFywAXACMAVEAuFQoDISEQChsQGyQlBA0HBx5pWQAHgAeQB6AHBAsDBwcNExMAaVkTBA0YaVkNEwA/KwAYPysREgA5GC9fXl0rERIAORESATk5ETMRMxEzETMxMAEiBgczNjYzIAAVEAAhIAAREAAhIBcHJgEyNjU0JiMiBhUUFgNOs8QZB0rniQEaASz+mP61/rn+rAFtAWABScVpvv7sqsaosqzFvATLs7JSXP78+f73/t4BZgFYAYkBmGnrVPwlloaFhol8gqAAAAAAAQBc/+wEmgRzACIAYEA2CSAaDw8DIBQDFCMkORcBAw8XLxcCCgYdF4JZEB1gHQIDHR0ABg8NAQwGBg1dWQYQABFdWQAWAD8rABg/KwBfXl0REjkYL19dKwBfXl1fXRESATk5ETMRMxEzETMxMAUgABEQADMyFhcHJiYjIBEQITI2NTQmIyIGBzU2MzIWFRQEAo3++/7UASr9gtd0WEmyUf7nAQBgcVpUM3Uvcqu/3/7sFAErAQwBEQE/JCz4Jy3+mP7GVEpCSyke60zBqMXeAAEAHwAABSMFtgAhAE9AKwYMEwAAEAEXHR0bAQwEIiMbAAkQCSAJAwoDCQkBDyEDDwNpWRMPDxEDARIAPz8/MysRADMREjkYL19eXTMREgEXOREzETMzETMRMzEwISERIyIGFRQXIyYmNTQ2MzMRIREzMhYVFAYHIzY1NCYjIwM7/st1QTwV8ggQua2BATWBrrkTBvEUPEF1A1wuMCk1EVwekqEBWP6ooJMnWAwyLDAuAAAAAAEACgAABFgGFAAVAERAIwQKERUVDgATAAoDFhcABwENAwcHAA0PAAAVFAINAmBZEQ0PAD8zKxEAMxg/PxESOS9fXl0REgEXOREzMxEzETMxMCERIyIVFBcjJiY1NDYzMxEhESEVIREBvGBuDtsID6ycagEyAWr+lgN5WiUnD0ElgpQBtv5K5fyHAAAAAf/s/+wFHwReACEASEAlCBETAAMZEw8DEyAgHgMDIiMgDxsKEA8AFQoFX1kKDxsWX1kbFgA/KwAYPysAGD8/ERI5ORESARc5ETMRMxEzETMRMzIxMDM2EjU0IyIHJzYzMhYVFAcBIQIRFBYzMjcVBiMiJjU0NwFUTlaLKyUxRGq1rxAB7wEMxkhaLys9U7i6Gv4plAFcn/IQ0RzQ1FpYAlb+iP7Uf3AQ1xjPynRT/bQAAAAAAgBe/hQEmARzACMAMABGQCYoABccBy4uHBoABDEyABMQEwINBRMfHwoaGwMkXVkDEAorXVkKFgA/KwAYPysAGD8SOS8zX15dERIBFzkRMxEzETMxMBMQACEyFhIVEAIjIiYnIx4CFxceAhUUByE2NTQmJycmJgIBIgIRFRYWMzI2NTQmXgEPASKb7IL12lqZMBAOMV1RWn1xKxn+5AwgNVmuu1gCLYh6Kpg+cmNjAUgBoQGKjf74r/7v/s4tIl9JKQgICztWNUMyGBYVDgUHDpgBMwMz/vT+5xYqN52xspwAAAD//wBc/+wD3QRzAgYARgAA////ff4UAd8GFAIGAE0AAP//AHf/7AXnBc0CBgJ7AAD//wBc/+wD8ARzAgYB7QAA//8ASv/sA7wEcwIGAecAAP//ALgAAASqBbYCBgCgAAD//wCg/hQEtAYUAgYAwAAA//8Ad//sBNEFywIGACYAAAABALgAAAbTBbYAEwA0QBkCBQUGDREOBg4UFQESCQMHAAAGCwcDDgYSAD8zPzMSOS8SFzkREgE5OREzMxEzETMxMAEBIxIVESERIQEzASERIRE0EyMBAy/+lAkT/usBgQF9BgGSAYX+3w8J/nsB7AKq/oh2/VgFtv1EArz6SgK0cwFs/VkAAAAAAQCg/hQF9AReAAwALUAWBgcAAQcBDQ4KBQIDBAQIARUHGwsIDwA/Mz8/EjkvFzkREgE5OREzETMxMCEhEQEjAREhESEBASEF9P7j/t3V/t3+5AGkAQgBCAGgA23+GAHo+qcGSv5MAbQAAAAAAgAA/hQElgRzABcAJABRQCoOCSIGEAccCwsMACIiDBADJSYKDg8OZVkHDw8DDBsUGF1ZFBADH11ZAxYAPysAGD8rABg/EjkvMysRADMREgEXOREzETMRMzMRMxEzMjEwARACIyInIxchFSEVITUjNTMREAAzMhYSJSIGFREWFjMyNjU0JgSW9dqafxIQAVz+pP7NeXkBFf+b7IL98XFqK3Q8cmNhAi/+7/7OTenHdXXHAuEBEwEvjf74n5ml/vgrK52xsJ4AAP//AEj/7ASiBcsCBgOGAAD//wB3/+wE0QXLAiYAJgAAAQcAeQIzAAAAEkAMAQAXQBcCLxcdAwclAStdNf//AEj/7ASiBcsCJgOGAAABBwB5AJ4AAAAOuQAB/9i0GR8QFiUBKzUAAf51BhQBjQdSABUAK0AeBw8QXxBvEH8QBBAQFQIPCy8LPwtvC38LrwvvCwcLAC9dxDIyL10zMTABFSMiJyYmIyIGByM1NDY2MzIeAjMBjRC0iGcyGS4rC7Y/bmk6cHeFTgbZwjYpDTQ7MmJ0NiYtJgAAAP//AHf+pAXnBc0CBgA0AAD//wBc/hQEcQRzAgYAVAAA//8AAAAAB7wFtgIGADoAAP//ABQAAAbFBF4CBgBaAAAAAgAUAAAEZgReAAcADAA5QCMHCAwEBA0OCgQFDAJnWS8MPwxfDG8M7wz/DAYMDAUABBUFDwA/PzMSOS9dKxESADkREgEXOTEwIQMhAyEBIQEBAycGAwNgVP5hVP77AZQBKQGV/m1hNROAAQz+9ARe+6IBzwE1sEz+ZwAAAAIAEAAABaYEXgAPABMAiUBVCg4OEQEIAAAMARAFBRQVCg1nWXYKAQQKFAoCZAoBzgreCgIECiQQEUgPCgENBQoKAQYQA2dZDxAfEJ8QrxAEDgMQEAEGBRUTCQYJZ1kGDwEOZ1kBFQA/KwAYPysRADMYPxESOS9fXl0rERIAORgvX15dK19dXXFdKxESARc5ETMRMzMRMzEwISERIQMjASEVIRUhFSERIQEhESMFpv1W/oF39gIAA5b+SAGb/mUBuPwvASdiAQ7+8gRevv6//t4BEAHHAAAAAAMAWP/sBwAEdQAoADIAOACaQDcUCSU2HikODiAJLS0gNR4EOToDGBsAKQ1nWSlAHSVIACkBDAUpHx82ZlkPHwH/HwEDHyIXGEgfuP/iQCIOD0gPHwEMBR8fGwAGL19ZACNgWQYAEBYRX1kbM19ZFhsWAD8zKysAGD8zKysREgA5GC9fXl0rK19dcSsAGBDFX15dKysREgA5ORESARc5ETMRMxEzETMzETMxMAEyFhc2NjMyFhUUBgcHFRQWMzI3FwYjIicGBiMiADU1ISYmIyIHNTY2ATc2NjU0IyIGFQEyNyEWFgIlieFIZMOeocPy8b9ZTYqnY7vr43NCrXjc/v8C0wWPg8S4Wb0CsHF8fIxjev3d2RH+UgJpBHVmaXhXvqWyqQkGVEVCTstkg0FAARHqlIKSWOwsJP3lBARXW4J6Zv4R6298AAADABIAAARIBF4AFAAdACYAeEBGFBIeFhYBEgkiDBoaIhIDJygMFRQAFGdZCR4FAAF3AAEWAAFmAAHKANoAAgMAJBARSA8AAQ0GAAASAgImZ1kCDxIWZ1kSFQA/KwAYPysREgA5GC9fXl0rX11dcV1xMzMrEQAzMxESARc5ETMRMxEzMxEzETMxMBMzESEyFhUUBgczFSMWFRQGIyERIyERMzI2NTQmIyczMjY1NCYjIxKYAWT02x0cpIU34MT+VJgBip9lXmVml41gXGFngQKkAbqIlC9VGrg7a5mtAez+1UxPSka4O0ZCOQAAAAEAef/wA+EEcwAVACZAFAMOEwkOAxYXEQBnWREQCwVnWQsWAD8rABg/KxESARc5ETMxMAEiBhUQITI2NxUGIyIAERAAMzIXByYCkYmVAR5IilOQq/n+9wEe+qKuUJwDsM20/oMgHMU5ASsBFAEMAThOvUgAAgCwAAAEZAReAAgADwAoQBQNBAAJBAkQEQUMZ1kFDwQNZ1kEFQA/KwAYPysREgE5OREzETMxMAEQACEhESEgAAMQISMRMyAEZP6//tH+vAFpARQBN/r+u4NoAWACOf7s/tsEXv7g/vUBbf0hAAAAAAIARgAABGQEXgAMABcAckBCExUMChEVFQEKBg0KDRgZFAwADGdZEX4AAQ0AHQACbQABA84A3gACBAAkEBFIDwABDQUAAAoCAhBnWQIPChVnWQoVAD8rABg/KxESADkYL19eXStfXV9dcV0zKxEAMxESATk5ETMRMzMRMxEzETMxMBMzESEgABEQACEhESMlECEjETMVIxEzIEZqAWkBFAE3/r/+0f68agMk/rmBurpoAWACiwHT/uD++/7s/tsBzWYBbf7rvv70AAAAAAEAsAAAA0QEXgALAGFAOwYKCgEEAAAIAQMMDQYJZ1l2BgEEBhQGAmQGAc4G3gYCBAYkEBFIDwYBDQUGBgECAgVnWQIPAQpnWQEVAD8rABg/KxESADkYL19eXStfXV1xXSsREgEXOREzETMRMzEwISERIRUhFSEVIREhA0T9bAKU/l4Bhf57AaIEXr76v/7aAAEAVv/wA9UEbwAnAHFAQwMEBBwiDAAcBxMTHBcMBCgpAxcYGBdnWXYYAQQYFBgCZBgBzBjcGAI+GE4YAgQPGAENBhgYCiUlH2dZJRAKEGdZChYAPysAGD8rERIAORgvX15dX11dXXFdKxESADkREgEXOREzETMRMxEzETMxMAEUBgcVFhYVFAYjIic1FhYzMjY1NCYjIzUzMjY1NCYjIgcnNjYzMhYDspuInqj95PqaSclTmIi6tG1gtahoaaiNa2nWhLngA1hokBQEEIpxmLU9wyImVlJKT7dEUTtEWpxAOZgAAAIAkf5KAd0EXgAIAAwAPkAjCwAADAQEDQ4WBwEDAAcBCgYCB2NZUAIBYAJwAgICDAkPDBUAPz8QxF1xKwBfXl1fXRESATkRMzMRMzEwARQjIjU0NjMyASERIQHdpqZTU6b+wwEx/s/+35WVR08E6fuiAAAAAf+q/rwBogReAA0AJEAUAgsICA4PAAVnWQAAUABgAAMACQ8APy9dKxESATkRMzIxMBMiJzUWMzI2NREzERQGOVE+PTZORfK3/rwTwA5daAQY++rEyAAAAAEAsAAABEgEXgAMADZAGwgEBAUMAgsAAAIFAw0OAgwIAwMDBQoGDwEFFQA/Mz8zEjkRFzMREgEXOREzETMRMxEzMTAhIQEHESMRMxE3ASEBBEj+7f7VaPLyYAE1AQ/+cAHjRf5iBF7+CoEBdf4bAAABACsAAAN7BF4ADQBQQCwJCwMABwsLBAAADQ4PAQMECgkHBgAIAQwIQA8CHwICDgMCAgAFDwALZ1kAFQA/KwAYPxI5L19eXRrNXl0XORESATk5ETMzETMRMxEzMTAzEQcnNxEzETcXBxEhFbg1WI3ycVzNAdEBcR+TVAIl/mlBk3f+w8EAAAAAAQCwAAAFdwReABMANEAZAgQFBQYNEhEOBg4UFQESCQMGCwcPDgYAFQA/MjI/MxIXORESATk5ETMzMxEzETMzMTAhASMXFxEjESEBMwEhESMRNDcjAQKW/usGCgTZAUoBEAQBHwFK4QoG/tkDc7+i/e4EXvykA1z7ogIdbub8jwABALAAAASmBF4ADwAsQBQDBgYHAA0LBwsQEQoDCAEHFQ4IDwA/Mz8zEjk5ERIBOTkRMzMRMxEzMTAhIQEjFxcRIxEhATMmNREzBKb+zf4OBgcH2QEzAfAGDNkDVHTH/ecEXvy06EgCHAAAAAACAHn/8AS6BG8ACwATAChAFAwGABAGEBQVCRJnWQkQAw5nWQMWAD8rABg/KxESATk5ETMRMzEwARAAISAAERAAISAAARAhIBEQISAEuv7l/vv++f7mARkBCgEFARn8vwEhASL+4P7dAjH+6f7WASwBFwEVASf+2P7q/oMBfQF9AAEASP/wA7AEbwAXACZAFAkVFQ4EAxgZBgBnWQYQDBJnWQwWAD8rABg/KxESARc5ETMxMAEiBgcnNjMyABEQACMiJzUWFjMyNjU0JgGPRXk5UKW79QET/uj8qYlShEWXmpsDrCsdvU7+0f71/uv+0DnFHCDJtrLJAAIAXAAIBOMERAALABkAJkASCRYPAxYDGhsMBodZDBMAh1kTAC8rABgvKxESATk5ETMRMzEwATI2NTQmIyIGFRQWEyAAERQGBCMgABE0NiQCnqaqqaempqWnAREBNI3++LD+7/7PjAEGAT9te3prbHt6bAMF/uD+/6H2hAEgAQGh9oQAAAEAXAB/BOMEAAAVAC1AFRIPDwAFCwsIAAMWFxMTAgcCDYdZAgAvKwAYLxI5LxESARc5ETMRMxEzMTATECEgABEUByc2NjU0ISAVFBYXISYmXAI+AR4BK0vsHSX+rv64Mi7+/C8lAfYCCv7k/vfEmFpIfD7u7l+URkubAAADACUABAUCBD8AEwAbACMAUUAuIQMWHw8XDRkQEhIZFx4fCAUDBgkkJRYfFx4EHBQIBQ8SBAAKChSHWQoAHIdZAAAvKwAYLysREgAXORESFzkREgEXOREzETMRMxEzETMxMCUgABE0Nyc3FzYhIAARFAcXBycGASIHATY1NCYDMjcBBhUUFgKi/u3+zS1kaGWdARMBEgEvM1JsVJz+/GNDAdsXpadSO/4xDqkEASEBAHpvRJpEl/7h/v+GcjWYN40DBBL+wi49emv+Mw0BNSY2emwAAAMAWP/sB3cEcwAfACsAMgCGQCgCDg4mHDATJhYVCCAgFS8TBDM0Ag4QABQwZlkPFAH/FAEDFCIXGEgUuP/iQCMOD0gPFAEMBRQUEAAEI11ZABlgWQQAEAspXVkLFhAsX1kQFgA/KwAYPysAGD8zKysREgA5GC9fXl0rK19dcSsREgA5ORESARc5ETMRMzMRMzMRMxEzMTABIBc2MzIWEhUQACEiJicGIyIANTUhNSYmIyIGBzU2NgE0JiMiBhUUFjMyNgUyNjchFhYCLQEOlo75oviF/uT+/3DIR4/w8/7vAuoHlYVkuG1avQSQbHt6a2x7emv8F2N2Cv4+AnQEc5ycjP75s/7s/tNOTZsBDO+UCH+NJjLsLCT9uqaqqaempqXBdnVufQAAAgB1//IENQReABoAIgBWQC4DHw4bGwsUEQAYBR8fGBELBCMkAw4hFhYhZ1kPFh8WAhADFhYIGRIPCB1nWQgWAD8rABg/MxI5L19eXSsREgA5ORESARc5ETMRMxEzETMRMxEzMTABFAYHFhUUBiMiJjU0NjcmJjU1MxUUMzI1NTMBFDMyNTQjIgQGQ1TG+Onn+GBkTEnywL7y/W3h4+HjA9llki5X3sDNy8BpoS0oh3SHhc/Phf0hzc3LAAABAFwCJwSYBHMAEgAcQAwLABMUAQoOBV1ZDhAAPysAGC8zERIBOTkxMAEhNTQmIyIGFRUhNRAAITIWEhUEmP7IbHt6bP7JAR4BA6H2hAInCqampacKCgESATCM/vqwAAABAFz/7ASYAicADQAcQAwKBA4PAwoHAF1ZBxYAPysAGC8zERIBOTkxMCUyNjchAgAjIgADIRYWAnt4awIBOAT+4v/4/t8CATcCcOGlof7y/tMBNwEEqJ4AAgCwAAADxwReAAgAEwA8QCAADg4PCQQPBBQVDQBnWQANEA0CFwMNDRAPFRAIZ1kQDwA/KwAYPxI5L19eXSsREgE5OREzETMRMzEwATMyNjU0JiMjBRQGIyMRIxEhMhYBolJscVxlbgIl59Zo8gFv0tYCUFdVU1GctL/+bwRerwACAB0AAAO+BF4ADQAWAElAJgMSAhIGCxYMBgwXGAMWDQ0WZ1kADRANAhADDQ0JDAIVCQ9nWQkPAD8rABg/MxI5L19eXSsREgA5ERIBOTkRMzMRMzMRMzEwAQEhASYmNTQ2MyERIxERIyIGFRQWMzMCNf70/vQBRWBo3tIBdPF5XmdkZ3MBsv5OAfAknm+XpvuiAbIB7kJOS1YAAAACAB0AAAO+BF4ADQAVAD9AHwoSCxIHAwAVBxUWFwoPDQ0PZ1kNDQQBCw8EFGdZBBUAPysAGD8zEjkvKxESADkREgE5OREzMxEzMxEzMTABETMRISImNTQ2NwEhARcjIgYVFDMzAs3x/ozO4mZi/rsBDgEKmHNnZMV5ArABrvuippltoCcB6/5SvFdNkgAAAAABACsAAAOLBF4ABwAlQBIAAQYBAwMICQEVBwMEA2dZBA8APysRADMYPxESARc5ETMxMCEjESE1IRUhAlTy/skDYP7JA5zCwgABAJr/7ASeBF4AEQAlQBEGAw8MAwwSEw0EDwAJXVkAFgA/KwAYPzMREgE5OREzETMxMAUgABERIREUFjMyNjURIREQAAKa/wD/AAExaXJiZQEx/vsUAQEBEQJg/ZaSgYGSAmr9oP72/vgAAAABAE4AKwTBBDMAFAA8QB4MEQ0ODgUJABEFAAUVFgABh1kADQkKCgmHWQAKAQoAL10rERIAORgvKxESATk5ETMRMxEzETMRMzEwNxEhMjY1NCYjIREhFQcVFhYVFAYjTgKNeXmrxv3yBF6PUVPTxysBMVZegHIBMekpETOzcsPKAAMARAArBmQEMwALABcALACCQB8YHRkaGiYMAAASBiohHSYmIQYDLS4JAwADEAMgAwMDuP/AQCkJDEgPFQ8VHxUvFQMRAxVACQxIAxUDFSErISKHWSEZKisrKodZACsBKwAvXSsREgA5GC8rERIAOTkYLy8rX15dETMrXREzERIBFzkRMxEzETMzETMRMxEzETMxMAEUBiMiJjU0NjMyFhEUBiMiJjU0NjMyFiUHFRYWFRQGIyERITI2NTQmIyERIQFeSUJCS0xBQUpJREJLTEE8UQTyj09U08b9JwKNeXmrxv3yBF4BZD9ISj08SUcBVj9ISj08SUEOKREys3PDygExVl6AcgExAAEATv7dBMEFfwAiAH5ASQwXDQ4UFB8JABsRFxcFHxsfIyQTAAEBAIdZigEBA2sBAQ8BfwECDwUBARsKGxyHWRsNCQoKCYdZAAogCgJACoAKoArQCvAKBQoAL11xKxESADkYLysREgA5GC9fXl1dX10rERIAORESATk5ETMzETMRMzMRMxEzMxEzMTATESEyNjU0JiMhESEVBxUWFhUUBxUWFhUUBiMhESEyNTQmI04CjXl5rMX98gRej01XpE5Ww9f9JwKN8qGtAZYBMVFXdWoBMekpES2qbvtZGy+tbr/CATGocm4AAAABABQAAAQnBF4ADAAaQAsEAQ0OCQMABA8DFQA/PzMSORESATk5MTABMwEhATMTFhYXNjY3AzH2/nv+9v589tcLLAUJKwcEXvuiBF79XiKoJzekFgABACkAAAY3BF4AGwAiQBALGxwdBRAXAwoaCxMPAQoVAD8zPzMzEhc5ERIBOTkxMCEhAyYmJw4CAyEBMxMWFhc2NxMzExYXNjYTMwUU/uycDiUCBCQTlP7u/tvwkQonBhgep+amIRQLJ5fvAlY5sBcqpkr9xARe/ZQkxjWmagJ7/YWEjEzBAn4AAQBWAAADqgReAAkAO0AdAAcECAEHAwEDCgsHBAUFBGdZBQ8CCAEBCGdZARUAPysREgA5GD8rERIAORESATk5ETMRMzMRMzEwISE1ASE1IRUBIQOq/KwCJf3pAzj92wIzmAMGwJX8+AABAET/7AOPBF4AGQBYQDECDgYZBQEJFRUBGQMaGwAGBhhnWQ8GHwZvBn8GBBMDBgYMBQIDAwJnWQMPDBJnWQwWAD8rABg/KxESADkSORgvX15dKxEAMxESARc5ETMRMxEzMzMxMBMBITUhFQEWFhUUBCMiJzUWFjMyNjU0JiMj9gEa/loC8v6sxMP+/ufKmEu1VIOBo6NgAo8BD8CV/sIJqpSjtT3FIiZTWU9LAAAAAAEAZP/sA1IEbwAlADxAHw4AIRMbABMHAAcmJwcTABsEIxAQCmdZEBAjHmdZIxYAPysAGD8rERIAFzkREgE5OREzETMRMxEzMTATNDY3PgI1NCYjIgYHJzYzMhYVFAYGBw4CFRQWMzI3FQYjIiZkg5aGQh9LSEGBUUevv66/PXByaVAoVFSauJXLxcMBLXKjQTotMyUxOSEht06kklJ5WS4sMzkkO0JGw0WqAAABAB//7AQABG8AIQAyQBoACwYfCxYZEAYiIwsOFhMcEAMTDhNnWQgOFgA/MysRADMYPxI5EjkREgEXOREzMTABFhYzMjcVBiMiJicGBiMiJzUWMzI2NyYmNTQ2MzIWFRQGAq5EYi1RLjFiZohtXp5iYjM1RDZiQ3dp06yr0msBUlhMFbcgVoV4YyC3FU1VftZumcTEmWrYAAEAsAAAA4MEXgAFAB9ADgMEBAEGBwQVBQJnWQUPAD8rABg/ERIBOTkRMzEwARUhESMRA4P+H/IEXsD8YgReAAABABQAAAQlBF4ADAAaQAsFCA0OCQYJBRUGDwA/PzMSORESATk5MTABBgYHAyMBIQEjAyYmAh0FKwzX9gGEAQgBhfTVDSsDnB62Iv1aBF77ogKoKqQAAAAAAQCwAAAEOQReAAcAJUARBAUAAQUBCAkBBRUGA2dZBg8APysAGD8zERIBOTkRMxEzMTAhIxEhESMRIQQ58f5a8gOJA578YgReAAAA//8AsAAAA8cEXgIGBaAAAAABAG0AAAU/BF4AGwBAQCAOCxUFBRIGABkZBgsDHB0VEQMICBFnWQgIBhoTDA8GFQA/PzMzEjkvKxEAMxEzERIBFzkRMxEzMxEzETMxMAEUBiMjESMRIyImNREzERQWMzMRMxEzMjY1ETMFP+bvJtwm7ufjdowW3BaJeeMC4c/K/rgBSMbRAX/+g3dmAlr9pmxvAX8AAQAd//AELwReABIAKUAUAAEJARMUARURA2dZEQ8HDGdZBxYAPysAGD8rABg/ERIBOTkRMzEwISMRIQICBiMiJzUWMzI2NhITIQQv8v7mRUqNdkEzLSkpLilFIALXA5792P7udBK/EUSpAcEBAAAAAAIAEgK0BBkGsAAHAAwANkAaCAEMAgIEBwEEAQ0OCgQCPwwBDAwFAAROBUkAPz8zEjkvXTMRORESATk5ETMRMxEzETMxMAEnIQcjASEBAQInBgMDJU7+fU70AXkBFQF5/ol9Dh1vArTy8gP8/AQBpgF+OmT+rAAAAAIADgK0BUQGrAAPABMAZ0A9Cg4OEQEIAAAMARAFBRQVDUEKAQQzCgHGCgGcCqwKAgkKAVkKAUgKAQoKBgM/EAEQEAEGBU4TCQkGSQ4BTgA/Mz8zETM/ERI5L10zETkvXV1xXV1xX3EzERIBFzkRMxEzMxEzMTABITUhByMBIRUhFSEVIREhJSERIwVE/YP+mWzmAd4DWP5mAX/+gQGa/HABE10CtPLyA/iw37D++fQBnAAAAAMApAK0A7gGrAAPABgAIABgQDgHCAgeEBoaDwQUCx4eFA8DISIHGRnWEAFOEAEEOhABxhABECQSE0gIEAFIEFgQAhAQDxgASRoPTgA/Mz8yETkvXXErXXFfcV0zEjkREgEXOREzETMRMxEzETMRMzEwEyEyFhUUBgcVFhYVFAYjIRMzMjY1NCYjIxERMzI2NTQjpAFK4s5WTmRaz7j+c+GDW1NZYXeUXVm/Bqx5iVd2DgYUd2OInwJnNj47Mv5z/vdERn8AAAAAAwASArQEDAasABMAGwAkAGRAOhMRHBUVAREIIAsZGSARAyUmCxQTEwgc1gABTgABBDoAAcYAAQAkEhNICAABSABYAAIAABEkAkkVEU4APzM/MxI5L11xK11xX3FdMzMzETMzERIBFzkRMxEzETMzETMRMzEwEzMRITIWFRQHMxUjFhUUBiMhESMhETMyNjU0IyczMjY1NCYjIxKSAUrizjqoizfPuP5zkgFzlF1Zv4uDW1NZYXcFGwGReYlZNqw3XYifAbv+90RGf6w2PjsyAAIApAK0BBcGrAAIAA8AIEAODQQACQQJEBEMBUkNBE4APzM/MxESATk5ETMRMzEwARQAISERISAABxAhIxEzIAQX/tT+5v7TAU4BBQEg6v7ReWABSAS6+/71A/j++PABSP1qAAABAKQCtAMIBqwACwBOQC4GCgoBBAAACAEDDA0JQQYBBDMGAcYGAZwGrAYCCQYBWQYBSAYBBgYBBQJJCgFOAD8zPzMSOS9dXXFdXXFfcTMREgEXOREzETMRMzEwASERIRUhFSEVIREhAwj9nAJk/n0Baf6XAYMCtAP4sN+w/vkAAAEAbQK0AtMGrAALAExALQcLCgUBAQMLAwwNA0EEAQQzBAHGBAGcBKwEAgkEAVkEAUgEAQQECwcISQALTgA/Mz8zEjkvXV1xXV1xX3EzERIBFzkRMzMRMzEwEyERITUhNSE1IREhbQGF/pUBa/57Amb9mgNmAQew37D8CAABAHECpgPbBroAGQA4QB0MAhkXEgcCFwcXGhsZPwB/AI8AAwAABA8KShUETwA/Mz8zEjkvXTMREgE5OREzETMRMxEzMTABIREGIyIAETQAITIXByYjIgYVFBYzMjc1IwI1AaavzO7+/wEnAQaplEx1fpOvkIdLRMsE8P3xOwELAQH2ARI9rDe8nqetDtMAAAABAKQCtAQMBqwACwBOQC0IBAQFAAkBBQEMDQNBCAEEMwgBxggBnAisCAIJCAFZCAFICAEICAUKBkkBBU4APzM/MxI5L11dcV1dcV9xMxESATk5ETMzETMRMzEwASMRIREjETMRIREzBAzh/lrh4QGm4QK0Abb+SgP4/nMBjQAAAAEAVAK0AjkGrAALADBAFggAAAoFAQEKAwMMDQkEBAZJCgMDAU4APzMRMz8zETMREgE5ETMzETMRMxEzMTABITU3ESc1IRUHERcCOf4bg4MB5YGBArR7OQKSN3t7N/1uOQAAAAAB/64BiQGFBqwADQAhQBECCwgIDg8FAAAQAEAAAwAJSQA/xF0yERIBOREzMjEwEyInNRYzMjY1ETMRFAYzTThFJktA4asBiRGyD1tcA7j8SrS5AAABAKQCtAP8BqwADAA2QBsIBAQFDAILAAACBQMNDgIMCAMDAwUKBkkBBU4APzM/MxI5ERczERIBFzkRMxEzETMRMzEwASEBBxEjETMRNwEzAQP8/wD+517h4VgBIfz+iwK0AaxB/pUD+P4vdwFa/kAAAQCkArQDNQasAAUAGkALAwAABQYHAUkDAE4APzI/ERIBOTkRMzEwExEzESEVpOEBsAK0A/j8urIAAAAAAQCkArQFFwasABMANEAZAgQFBQYNEhEOBg4UFQkBEgMGCwdJDgYATgA/MjI/MxIXORESATk5ETMzMxEzETMzMTABASMXFxEjESETMwEhESMRNDcjAQJm/v4GDQLJATP8BAEMATTTCgb+7QK0Ax3lX/4nA/j8+AMI/AgB4mnS/OMAAAAAAQCkArQEQgasABAAJkAREA4HCA4IEhELAggPCUkBCE4APzM/MxI5ORESATk5ETMRMzEwASEBIxcWFREjESEBMyY1ETMEQv7n/kEIBwjNARgBuwgMzwK0AvRreED+LwP4/RXUQgHVAAABAKQCtARiBqwADwAsQBQCBAQPCAwJDwkQEQUNDwAGSQkPTgA/Mz8zEjk5ERIBOTkRMzMRMxEzMTATMxEHBzMBIREjETQ3IwEhpMsGBQQB7AEOyA4G/hD+8gas/hWvaAMC/AgB6GW9/PYAAAACAHECpgRaBroACwAUACBADgwGABAGEBUWEglKDgNPAD8zPzMREgE5OREzETMxMAEUACMiADU0ADMyAAUQISARECEiBgRa/vzy8v7/AQH09AEA/QABCgEL/veFhwSw/P7yARD8+wEN/vH7/qoBVgFYrwAAAAIAbQKmA/YGrAAaACIAQUAgAx8OGxsMFBEAGAYfHxgRDAQjJAMOISEWFgkZEkkdCU8APzM/MxI5LzMSOTkREgEXOREzETMRMxEzETMRMzEwARQGBxYWFRQGIyImNTQ3JiY1NTMVFDMyNTUzARQzMjU0IyIDyUBQYF3r2tvpukxD37q44P2N29vb2wZCXIcqK4xnscC9scxTJ4BoamrBwWr9a8HBugAAAAIApAK0A4MGrAAHABEAKkATAA0NDggDDgMSEwAMDA8OTgcPSQA/Mz8SOS8zERIBOTkRMxEzETMxMAEzMjU0JiMjBRQGIyMRIxEhIAGFSs9WXGcB/tfHYOEBVAGLBM+ZS0mNpKz+lQP4AAAAAAIApAK0A/YGrAAIABUAPEAdExASBAAKCgsQBAsEFhcSAAAACQEJCQwUC04IDEkAPzM/MxI5L3EzEjkREgE5OREzETMRMxEzETMxMAEzMjY1NCYjIxERIxEhMhYVFAcBIQMBhUhwYWZvROEBNdrQwgE1/vr8BOlJRUg9/j3+ewP4lZq3Vf5DAYUAAAAAAQAnArQDTAasAAcAIEAPAAEGAQMDCAkBTgcDAwRJAD8zETM/ERIBFzkRMzEwASMRITUhFSECKeH+3wMl/t0CtANGsrIAAAAAAQCaAqYEBAasABEAIEAOCgcBEAcQEhMRCEkNBE8APzM/MxESATk5ETMRMzEwAREUBiMiJjURMxEUFjMyNjURBATq0Mvl4Wtqb2QGrP1vqsvHsgKN/ZN9aG92Am0AAAAAAQAlArQFsgasABsAIkAPChscHRcOCgkKGhNJAQlOAD8zPzMzERI5ORESATk5MTABIwMmJicGBwMjATMTFhc+AjcTMxMWFzY3EzMEpvyPByMFGBqN/P7024UnDAMHHgma0ZohEAophdsCtAIXGqoplVr96wP4/dWjZxEppR0COf3HlmZasAIrAAIAWgKmAzEFzQAYACIAM0AYEh0IGAwiCCIjJAIAGQwMFABODxRMHwVPAD8zPzM/EjkvMxE5ERIBOTkRMzMRMzMxMAEnIwYGIyImNTQ2Nzc1NCMiByc2MzIWFREDBwYGFRQzMjY1ApYsBjh8WnaGtLePf2OCSo6ppLDdWGFebU1dArRpRTKBdX10BgUidzmPRoiL/foBaQQEOz5aVkUAAAACAI0CpgNkBcsAFwAhAD9AIRIIGA0NFggcFhwiIwIXDA8YHxgvGAMYGBQXSx4FTA8UTwA/Mz8zPxI5L10zETkREgE5OREzETMRMxEzMTABFzM2NjMyFhUUBgcHFRQzMjcXBiMgERETNzY2NTQjIgYVASctBjt4WXaItLiOf2SCSpCq/q7dV19fa05cBbxoRzCAdH11BgQjdjmPRgESAgT+mgQCPUBYVUYAAAAAAgBoAqYDYgXLABAAHAAuQBYbAwwJFAMUHR4JDw0KSw1OGAZMEQBPAD8yPzM/PxI5ORESATk5ETMzETMxMAEiJjU0NjMyFzM3MxEjJyMGJzI2NTU0JiMiBhUUAZyQpKSWmVAGFbysKwhNTVxMUFxKUQKm1L282HFi/Phlc6ZoaxZ+bHR26QAAAAMAYgKmBT0FzQAnADIAOABjQDcTCSQ2HSgODh8JLCwfNR0EOToXAxoNDygfKC8oAygoNrMewx4CiB4BHh4aLyIiBgBMEDMzFRpPAD8zMxEzPzIyETMROS9dXTMzL10zETk5ERIBFzkRMxEzETMRMzMRMzEwATIWFzY2MzIWFRQGBwcVFDMyNxcGIyInBgYjIiY1NSEmJiMiBzU2NgE3NjY1NCYjIgYVATI3IRYWAbJkpTVIj3J2jq+xi3lke0iOpqZTMn1Ynr4CEQVoXo+IRogB81JbWTYuS1f+cZwO/sYCTAXNR0lTPYF1fXUGBDteN41GXC4svaNnW2U9pCAX/okEAj1AMiZVRP6lpExYAAAAAgCRAqYDiwbpABMAIAAsQBURDAMeDB4hIgoRBg1GDE4UAEwbBk8APzM/Mj8/ETk5ERIBOTkRMxEzMTABMhYVFAYjIiYnIwcjETMVBwczNhciBgcVFBYzMjY1NCYCWJCjpZRGbywRJargBgULTVJXRwFLVkZQUAXJ1bu91i81VgQ1+WQ2c6pnaBd6bXdyc3EAAgBoAqYDYgbuABIAHgAwQBcdAw4LCRYDFh8gCBEGDEYPThoGTBMATwA/Mj8zPz8ROTkREgE5OREzMzMRMzEwASImNTQ2MzIXMyY1NTMRIycjBicyNjc1NCYjIgYVFAGckKSklplQCBDfrCsITU1YTgJQXEpRAqbUvbzYcXAs+PvGZXOqYG8Wfmx3c+UAAAAAAgBoAqYDVgXLAAYAGgBKQCsZEQMSEgoRBAoEGxwSDQMB/QMB7AMBmQOpAwIPAx8DLwMDAwMHAA1MFQdPAD8zPzMSOS9dXV1dcTMREgE5OREzETMRMxEzMTABIgYHITQmAyImNTQ2MzIWFRUhFhYzMjY3FQYB7ENVBgE3UCvE3sq3r7799ANpXk+BRW4FNU9VTVf9cc6/wde8pWZaZh4fpDcAAAACAGICpgNgBcsABgAZADZAHRcEEAoSEgMQAxobBLMRwxECiBEBERENFQdMAA1PAD8zPzMSOS9dXTMREgEXOREzETMzMTABMjY3IRYWEzIWFRQGIyImNTUhJiYjIgc1NgHVRVsG/rgCVinM4M+6r8YCHQNtYZiHdgM9T1VOVgKO0LzD1rulZ1tlPaM4AAEAWAKmAyUFywAkAFlANBMSEgUFDyEVGgoKARUPBCUmEwICBCQB9CQB4iQBrCQBBJkkAQ8kHyQvJAMkJAweGEwHDE8APzM/MxI5L11dX11dXXEzEjkREgEXOREzETMRMxEzETMxMAEVIyIGFRQzMjcVBiMiJjU0Njc1JjU0NjMyFwcmJiMiBhUUFjMCg3tzYsOfjnnFxMlgZ5y4qrGPRjVzS1JQX2kEk5MwMFY7qjV0cU1cEgYkkGFqOpkVIiImLiYAAAABAEgCpgMSBcsAIgBZQDQQEREeCRkOAxQeHgMhGQQjJBAhIQQiAfQiAeIiAawiAQSZIgEPIh8iLyIDIiIXBgtMHBdPAD8zPzMSOS9dXV9dXV1xMxI5ERIBFzkRMxEzETMRMxEzMTABMjY1NCYjIgcnNjMyFhUUBxUWFhUUBiMiJzUWMzI1NCMjNQFOb3hMWoCAQZzCmrakZFbWu89qiY/Z51YEkyIuJScxkzpwXYUnBhdXRm+DMa47WlyTAAIAZgFeA2AFywALACYANEAaEQkeDCQDAxceAycoJBkbJUsHIUwAG08UD00APzM/Mz8zPxI5ORESARc5ETMzETMzMTABMjY3NTQmIyIVFBYFFAYjIic1FjMyNTU3IwYjIiY1NDYzMhczNzMB6VdMA1FZm0wBys3KsYCZpKwCAlCXk6GmlJZVBhO8A0xhcBh+bOp1dJqnrS2qO7AMY3PWu73XcWIAAgCJAYMBewW8AAkADQAuQBgMAAANBQUODwhQAgFgAnACAgINgApLDU4APz8aEMxdcTIREgE5ETMzETMxMAEUIyImNTQ2MzIDMxEjAXt5PD1AOXno3t4B7Gk3MjI2A2j8+AAAAAEAkQK0A7oG7gAOADVAGw4KBAcDBQUHCgMPEAcEAAMICAoCC0YCSwYKTgA/Mz8/ERI5ERczERIBFzkRMxEzETMxMAE3NzMBASEDBxUjETMRBwFmYeX8/roBWP8A62De3gsEWnfr/rH+RwE8SvIEOv4esgAAAQCRArQFZgXLACEAOUAcDAkAARobGwEJAyIjEwwJEApLGwEJTh4EBBYQTAA/MzMRMz8zMz8REjk5ERIBFzkRMxEzETMxMAEjETQjIgYVESMRMxczNjYzMhczNjYzMhYVESMRNCMiBhUDat17VE/eqh8KJoFHt0EUIH9Ri43de1lLArQBx6h2iv6RAwhiPDVxNTyGl/4GAceoeXEAAQCRAV4DgQXLAB0AMEAXAhMTEBsIEAgeHxMQFxFLEE4LF0wFAE0APzI/Mz8/ERI5ERIBOTkRMxEzETMxMAEiJzUWMzI2NRE0IyIGFREjETMXMzY2MzIWFREUBgJ5TDknLy4sg11V3qofDCmGS4yVhwFeE6YNOTwCCpp3if6RAwhiPDWPjv2ye4cAAAACAGgCqAN9Bc0ACwAXACBADgASDAYSBhgZCRVMAw9PAD8zPzMREgE5OREzETMxMAEUFjMyNjU0JiMiBgUUBiMiJjU0NjMyFgFMTlpZT09bWE4CMdK5tNbQvrPUBDtzdnZzdHR0dL7V2Lu+1NgAAAAAAQBGAqgC0wXNABcAIEAOEAMVCgMKGBkNEkwHAE8APzI/MxESATk5ETMRMzEwASImJzUWFjMyNjU0JiMiByc2MzIWFRQGAUpTbDU0akVUY2ZXS2U9apy+yc0CqBohtSAkdHN3cS2kM8zGxc4AAQBoBDUDfQXNABEAI0ASCwASEwGPCgGwCgF/CgEKBQ5MAD8zL11dcTMREgE5OTEwASM1NCYjIgYVFSM1NDYzMhYVA33hT1tYTuTQvrPUBDUGdHR0dAYGvtTYugABAGgCqAN9BDUADQAjQBIKBA4PA48KAbAKAX8KAQoAB08APzMvXV1xMxESATk5MTABMjY3MwYGIyImJzMUFgH0WE4C4QLRuLHXAuROA1JzcLvS1bhwcwAAAgCRAV4DiwXLABIAHgAwQBcKBAQHEBwHHB8gAwoNCEsHTRMNTBoATwA/Mj8zPz8ROTkREgE5OREzETMRMzEwASImJyMXESMRMxczNjMyFhUUBgMiBgcVFBYzMjU0JgJSRWwyCwvetR4LUZaSo6nUUUwCTVaWSQKmLTdw/sQEXmRz1ry81wJ9Y24XfWzrc3MAAQAxAqYCZgZiABQALkAWDxMTCBEDAwgKAxUWDQkSDBIPSwAFTwA/Mz8zMxEzzRESARc5ETMRMxEzMTABMjcVBiMiJjURIzU3NzMVMxUjERQB2z9MV3WEemt7QI3l5QNQGJ8jgIIBd1hHpKad/olYAAEAiQKmA3kFvAATAClAFAsIEwEBEAgDFBUCBREJSwBODQVPAD8zPz8zEjkREgEXOREzETMxMAEnIwYGIyImNREzERQzMjY1ETMRAs8fCiiHTY2U3YNfUt8CtGU8N5KLAfn+PKh4iAFs/PgAAAEAYALTA54FoAATAC5AFAsQDA0NBAgAEAQABBQVAQAMCAgJAC8zEjkvMxESATk5ETMRMxEzETMRMzEwEzUhMjU0JiMhNSEVBxUWFhUUBiNgAduxe5L+gQMvaD84mJMC09N9WU/Voh0MJYBLhY0AAAABAIsCpgVgBbwAIQA4QBwcGQIhCgwMByEZBCIjDBMWCAAaSwtOBB4eEBZPAD8zMxEzPz8zMxI5ORESARc5ETMRMxEzMTABMxEUMzI2NREzESMnIwYGIyInIwYGIyImNREzERQzMjY1Aofde1VN36oeDSV/R7s9FR9/UYyN3XtZSwW8/jyod4kBbPz4ZTw3czQ/iJUB+f48qHdwAAABABICtANkBbwACwAYQAoBCgwNBQkBSwBOAD8/MzkREgE5OTEwAQEzExYXMzY3EzMBAUr+yOqcHAQECBuc6f7JArQDCP5GWEhLVQG6/PgAAAEADgKoAt8FzQAhADFAGQALBh8LFhkQBiIjABYTCw4cTAMIExMIDk8APzMzEjk5PxI5Ejk5ERIBFzkRMzEwARYWMzI3FQYjIiYnBgYjIic1FjMyNjcmJjU0NjMyFhUUBgICIzYiPCYqS1FnOTpuUkMuLTIrMCZTPZp/gZk6A5wnGxCsFjtERDsWrBAXK1iAUXaSkXdJgQAAAgCRAV4Dwwb2ABMAJwBGQCQXDw8QBgcHIAMlCRwcJSAQBCgpBiAgiiEBISEMEE0UAEcaDE8APzM/Mj8ROS9dMxI5ERIBFzkRMxEzETMRMxEzETMxMAEyFhUUBgcVBBUUBiMiJxEjETQ2FyIVERYWMzI1NCYjIzUzMjY1NCYCF63Db2YBEcCnj17ezbKhJGgqtl9dNidPS1QG9pKBZngRBB7xkqkt/osEUJ6qprb95RUapEtQqFJCREEAAAABABkBXgNmBbwAEwAfQA8QBAEFBBQVBAoBDwVLAU0APz8zEjk5ERIBFzkxMAEjNDY3ATMTFhYXMz4CEzMBBgYB8uwnIf7L53cOMwkEARYXjOf+0x4pAV44x18DAP6qJrUrE1pPAaD9BUjIAAACAGYCpgN7BvYAGwAmADhAGwgUAA4hGg4CFBwcAhoDJygRHgAAFwsFRyQXTwA/Mz8zEjkRMzMREgEXOREzETMRMxEzETMxMAEmNTQ2MzIWFwcmIyIGFRQWFxYWFRQGIyImNRAFNCcGBhUUFjMyNgGFrrCcW6BVWJpmPDFlZoZ8zMGy1gI0lFdlXEpQWgUxaIZlcicnlkYqHCVJMUGqb7S/rJMBAdyFTBd9WUlVXwAAAAIAaAFeBIEFzwAJACMAOEAcFQ4HIyMYCh8AAAoOAyQlEUwKTQMcTAcYGCILTwA/MzMRMz8zPz8REgEXOREzETMzETMRMzEwATQmIyIGFRE2NgERJiY1NDY3FwYGFRQWFxE0NjMyFhUUBgcRA6pGPyouYXz+VMTSUVWiPzhkYZ6FpLzpywROcXA3RP6YCI79fAFOFMmsbshkZVGTVWF0EAFogJXLrq/iE/6yAAABAAABXgOgBccAHwA4QBwWBxcGDg4XHQMgIQcXFAQUBBYFSxZNGwBMCxBNAD8zPzI/PxI5ORESOTkREgEXOREzETMyMTATMhYXFxMzARMWFjMyNxUGIyImJycDIwEnJiYjIgc1NtFgZSg2zd/+tI0WNikhKUFNYnAjTOXsAW1hFDgiMCRRBcdSdZoBVv3w/rYvIwmkF2hl4f5SAmf1NiQMqhUAAAIAh/9gAXsDmgAHAAsALEAXBAgIAAkJDA0GXwIBbwJ/AgICgApSCVAAPz8azF1xMhESATkRMzMRMzEwEzQzMhUUIyITIxEzh3t5eXvo3t4DMWlpZvyVAwgAAAAAAQCR/2ACpAJ3ABAAIUAPAg0KDRESDQoLUgpQBQBTAD8yPz8SORESATk5ETMxMAEyFwcmIyIGFREjETMXMzY2AlgwHBEgIWp53qghCi9/AncEyQhmYP50AwiBR0kAAAD//wCJ/1MDeQJpAQcF4AAA/K0AB7IAE1AAPzUA//8AEv9hA2QCaQEHBeMAAPytAAeyAAtQAD81AP//AJH+CwPDA6MBBwXlAAD8rQAJswEADFEAPzU1AAAA//8AGf4TA2YCcQEHBeYAAPy1AAeyAAVSAD81AAACAIv+FANeAncAEAAcADJAGgYKABoKGh0eBg4wCgEACtAKAgoDEQ5TFwNRAD8zPzMQxF1xEjkREgE5OREzETMxMCUUBiMiJyMWFRUjETQ2MzIWJSIGFRUWMzI2NTQmA16olWtYDAzTv66ixP6WTEo3X05FRee72DV0RL0C1bzS2C5mc7s7bnt7awAA//8AaP4VBIEChgEHBegAAPy3AAmzAQAcUwA/NTUAAAD//wAA/hUDoAJ+AQcF6QAA/LcAB7IABVIAPzUAAAIAmv/sB2gEcwAkACsAcEBAIhoKBxsQGikpEAcDLC0CEwAWDxsBDQUoG2ZZpSgBaSgBDCgcKAIQAygoABYRCA8WJV9ZFhAAHmBZBA1dWQAEFgA/MysrABg/KwAYPzMREjkvX15dXV0rAF9eXRESOTkREgEXOREzETMRMxEzMTAFICcGIyAAEREhERQWMzI2NREhFTY2MzIAFRUhFhYzMjY3FQYGAyIGByEmJgWg/vKOhuT/AP8AATFpcmJlATErgUfWAQH9MAKVf2W0YlC2rl9xBwGsAnIUvLwBAQERAmD9lpKBgZICaj0mLP7t6ZSAlCou7CgnA659b3F7AAL/g//sBLQGFAAnADIAcEAkFiIxAhAIMRAxMzQTIQAZAfAZAQ8ZHxlvGX8ZBAkDGRUfGQMluP/AQBoPFUglJRsNAwsFEBUbAAUoXVkFDwsuXVkLFgA/KwAYPysAGD8/ERI5ORI5LysXMy9fXl1dcTMzERIBOTkRMxEzETMyMTABFAczNjMyEhEQAiMiJyMHIxEmIyIHIzY2Mxc1IRUWMzI3MwYGIyInEyIDFRQWMzI2NRAB0QwMa9LQ1ufHxXAVM+kcE0sNlgKHcSMBMRMgShKWBIdxGRDb1QZrdF5vBJZCnKb+5f7u/uv+0I97BQgGbo+kBEWnB2+PpAb+wv7VDLScraUBNQAAAAIAXP/sBY0GFAAnADQAfkAsBjYhMjITAQsmGwsZKysbEwM1NgUeACQB8CQBDyQfJG8kfyQECQMkAyEkAwm4/8BAGg8VSAkJJw4YEBYMFScAFi9dWRYPEChdWRAWAD8rABg/KwAYPz8REjk5EjkvKxczL19eXV1xMzMREgEXOREzMxEzETMRMxEzETMxMAEVFjMyNzMGBiMnESMnIwYjIgIREBIzMhczJjU1JiMiByM2NjMyFzUDMjY3NTQmIyIGFRQWBHEZFksNlQSGcCLqOw1o1cXh6sTWbAoXFR5KEpUCiXEWEtJ1bQVxe2ZxcgYUpwdvkqEE+1qRpQEyAQ8BCQEopIBgbgZujqUGR/rLiKMhqZKim6WlAAAB/7QAAAN1Bh8AKwBvQD4jBy0VAA0bDioCDQ0dGQ4OLC0EFQ8KAf8KAQAKEApgCnAKBAkDCgYRCgMYGCoOFSEmXVkhARoBKgFgWR0qDwA/MysRADMYPysAGD8SOS8XMy9fXl1dcTMzERIBOREzMzMRMzMRMxEzMhEzMzEwASERFjMyNzMGBiMiJxEhESYjIgYHIzY2Mxc1IzU3NTQ2MzIXByYjIgYVFSEDCv74GhlKEpYEh3EZEP7PHBMlLQaWAodxI6iovM+ee05cTkE6AQgDef7NB2+PpAb+fwHhBjo0j6QE0ZNSUr+wL+AdTTxGAAAD/7wAAAgzBHMAKgA0AD0AlkBTJT8OFBE1CQkKKwMDNwQiKSktKioEChEEPj8IBQIDAAs1NysDLREkJAs/EZ8RAhEiLQ4oLREOBAAAEAACCwMAABoVChgSDyoEChUxOxg7XVkeGBAAPzMrEQAzGD8zMz8REjk5OS9fXl0XMy8RMy9dMzMvERIXORESFzkREgEXOREzMxEzETMzETMRMxEzETMyETMxMAEmJxEhESYmBxEhEQYHIzQ2NxEzFzM2NjMyFzM2NjMyFhUVNjczBgYHESEBFhc1NCYjIgYVJRYFNTQmIyIGBhDEw/7Pp90D/s9DC5Z5a+kpES2qbvtZGy2vbr7DShKVBHV4/s7+eeifUVd5Zv1QcQEOUVdkYwFMDh3+iQGoHCYD/hkB8BplfKQTAbqPTVekTlbD18skaIeeG/6mAi8jCot5eaehYwwuLXl5dQAAAv+8AAAFmgRzAB4AJwCAQDIDKREXFCYMDA0ABwcfCAgNFAMoKQkLBg4fJgACDg8UXxRvFAMJFAARFAMABhAGAg4DBrj/wEATCQxIBgYYDRsVDwgNFRsjXVkbEAA/KwAYPzM/ERI5OS8rX15dFzMvXl0zMxI5ORESOTkREgEXOREzMxEzETMRMxEzMhEzMTABNjczBgYHESERJycRIREGByM0NjcRMxczNjYzMhYVBTU0JiMiBgcWBKhRC5YFdXj+z93J/s9ECpZ8aOkpETOzcsPK/s9WXmdzEkYCHRpjipUT/pgBfTEp/ikB8B5hfqYTAbaPUVPTx6RYeXlugA4AAAL/g/4UBLQEcwAoADQAdkBDGAsyHwMDHAYtERESJTIyEhwDNTYKFQAbAfAbAQ8bHxtvG38bBAkDGxsYCAMODhICIAAiHQ8SGyIpXVkiEAAwXVkAFgA/KwAYPysAGD8/ERI5ORE5LxczL19eXV1xMzMREgEXOREzETMRMzMRMxEzETMyMTAFIicjFhUVFjMyNzMGBiMiJxUhNSYjIgcjNjYzFxEzFzM2MzISERQCBgMiBgcVFBYzMhE0JgMGxXAQEBMgShKWBIdxGRD+zxwTTAyWAodxI/grDmvSxuBpwt1xaAJrdM1lFI+MFpcHbY+kBmfFBmyPpAQEvJGm/s7+8LP++IoDk4ugIbScAVKlpQAAAf+DAAADdwRzACUAakBAGwIOCRQUFQ4iIhUfAyYnDRgAHgHwHgEPHm8efx4DCR4LGx4DABEQEUARUBFgEQULAxERIxUAAAVkWQAQIA8VFQA/Pz8rERIAOTkYL19eXRczL15dXXEzMxESARc5ETMRMxEzETMyMTABMhcDJiMiBhUVFjMyNzMGBiMiJxEhESYjIgcjNjYzFxEzFzM2NgMQPikXJTWSoxYdShKWBIdxGw7+zxwTTAyWAodxI+ctDzSxBHMJ/uIKlocvBm2Opgf+vAGiBm2OpgUB9LxecwAAAAAB/30AAAMEBHcAIQBdQDgGGxYhIQsAABAiIwMaAAkB8AkBDwlvCX8JAwkJGAYJAwAeEB5AHlAeYB4FCwMeHg4AFQ4TYVkOEAA/KwAYPxI5L19eXRczL15dXXEzMxESATk5ETMzETMzMjEwMxEmIyIHIzY2MzIXNRAhMhcHJiMiFRUWMzI3MwYGIyInEZocFEoOlQKFcxESAX12dyNEPZUWHUoSlgSHcRsOAaIGbY2nBXUBmCH4FaTFBm2Opgf+vAAB//j/7AQKBHMAMQBXQDEuMxYfMAYaJRgwDQ0YGgMyMxYPLR8tAhADLRolDQAtEysWCAMdHSNgWR0QAwpfWQMWAD8rABg/KxESABc5GC9fXl0vERIBFzkRMxEzETMRMzIRMzEwARQGIyImJzUWFjMyNjU0LgMjIgcjNDcmNTQ2MzIXByYmIyIVFBYWFxYXNjczBgcWA6zv7nqsS1XVUVpMIERigTxhDpV/G+fUyr9cVJRMhRpEjHc9QAyVB20WAUystCEg/Cg2MDAfNzUvLWy6SjhZlaNY3CQuSxkiJjszMxdTukQ4AAAAAf+k/+wDNwVMACoAfkBIDiIoFAgZHSgoEggbAwgDKyxpCwEhCwARAfARAQ8RHxF/EQMRZh8BHxEOAwAlECUCCQMlJQUZExwWGBlAGRxgWRkPBQBdWQUWAD8rABg/KwAaGBDNMxEzERI5L19eXRczXS9dXXEzM10REgE5OREzETMzETMzETMRMzIxMCUyNxUGIyImNTUmIyIHIzY2Mxc1IzU3NzMVIRUhERYzMjczBgYjIicVFBYCd1Bwcqa3pxwUSg6VAoVzI5KoWMMBOf7HFR5KEpYEiW8bDknfI+MzubmWBm2NpwW9gWbs7uX+4wZtj6UHOEE+AAABAAQAAAPVBF4AIAB5QEYPHh8OEB4DFA4TABkADgMBCQYhImkFARgFYAwBAwwTDAIJDCYWARYJDAMPHF8cAg0DHBwRAh8BERBeWREPExARAR9eWQEVAD8rERIAORg/KxESADkSORgvX15dFzNdL15dXTMzXRESARc5ETMRMxEzMxEzETMxMCEhNRMmIyIGByM2NjMyFzchNSEVAxYzMjczBgYjIicHIQOq/I38GxolOQaWAodxWVeP/hkDQvYeIU0RlgSLd0xchwIKtAFYBjc1j6Qnw+nG/rAGbJGiJ7kAAAIAoP4UBLQEcwALACYATkAnEh4EGCQkJh4JJgknKCMZIRsbAF5ZGxshDw8UX1kPECYhIQddWSEbAD8rABgQxD8rERIAORgvKxESADk5ERIBOTkRMxEzETMzETMxMAEiBgcVFBYzMhE0JiU0JCEyFxUmIyIVFQczNjMyEhEQAiMiJyMHIQKieGkFb3vXa/2OARkBFvWtzejrBARs0cXh5MrOdQkY/v4BrIugJbKeAVKopt3x+UL0Vv0Tjab+0P7u/u/+y6SPAAAAAQCWArQDnAW2AAsAREAmCAQEBQAJAQUBDA0DGggqCAKtCAGcCAFZCAFICAEICAUKBksBBU4APzM/MxI5L11dXV1xMxESATk5ETMzETMRMzEwASMRIREjETMRIREzA5zi/r3h4QFD4gK0ATP+zQMC/uoBFgACABD+FARMBF4AFQAhAFxALwwRCRYQAA0WBhEKABwcCgYDIiMKCxERCQ4JH2JZCQkDEA0ODg1eWQ4PAxlfWQMbAD8rABg/KxESADkSORgvKxESADkRMzMREgEXOREzETMRMzMRMxEzETMxMCEUACMiJDU0NjcnNSUhNSEVARceAgUUFjMyNjU0JiMiBgRM/t3+9/7c59iyAUn98AO0/liDkI9I/Px3cG92cHVrfOX++f/jvPMkcGXX6c3+5lJambp+f4eIfnaGhgAAAAABAC//ZgghBhQANACXQFErASMREQocFxcaISUlGgwYLigJCSYKAAEBChgaBDU2JgslLAwFDioPFwEiAxcKKicALwoxMQVdWTEQICFAGyQhJGBZHiEPAQoVGBMTDl1ZExYAPysRADMYPzM/MysRADMaGBDNPysREgA5GD8zEMRfXl0REhc5ERIBFzkRMxEzMxEzMxEzETMRMxEzETMRMxEzETMxMCEhETQmIyIGFREhEQEWMzI3FQYjIicHIzcmNREjNTc3MxUhFSERAREhFTczAQYHMzYzMhYVCCH+z1hdgHH+z/43DBtQcHKmUTZ128BHkqhYwwE5/scCJwExR9z+3QYJEWbdxswCjXl5rsP98gMj/cAEI+MzEJbyWK4CG4Fm7O7l/gYCuAHdXFz+knBnpNPHAAEAAAAAAnEEXgALAEpAKwINBwAEBAkFBQwNAwcIB4ZZAKcIAQgiGRpIfAiMCAJJCNkIAggIBQoPBRUAPz8SOS9dXStdMysRADMREgE5ETMzETMyETMxMAEzFSMRIREjNTMRIQHRoKD+z6CgATECnsf+KQHXxwHAAAEAAP/sAxcEXgAVAFdAMg0UEgAADwsLBhYXFQ0ODXFZEmcOAQ4iGRpIPA5MDgIDDw6fDgINBQ4OFhAPCANdWQgWAD8rABg/EjkvX15dX10rXTMrEQAzERIBOTkRMzMRMzMyMTABFBYzMjcVBiMiJjU1IzUzESERMxUjAdFJPFFwbZq+sqCgATHJyQFeQT4j4zO5uXnHAcD+QMcAAAMAAP4UBRAEcwAZAB8AJgB+QEkJDgMDCwckHBYUFxccBwMnKAIOABEXHQkKCWVZFCN3CgEKIhkaSEwKXAoCGQqpCgIDCgonDA8HGw8gAQwGESBdWREQABpdWQAWAD8rABg/KwBfXl0YPz8SOS9fXV0rXTMzKxEAMzMREjk5ERIBFzkRMzMRMxEzMxEzMjEwBSInIxYVESERIzUzETMXMzYzMhYXMxUjBgIlMjchFhYTIgYHISYmAwbFcBAQ/s+goPgrDmvSsdoVYmAT3/7yrRz+XAtpY2hjDAGiEGEUj4wW/jsDw8cBwJGm+N3H6P799/R/dQKccHFwcQAAAAIAAP/sBUQEXgASABgAV0AuBwwWFgkGEAANFRIAABUGAxkaABYHCAeGWRAMDwhPCAINAwgIAw4KDwMTXVkDFgA/KwAYPzMSOS9fXl0zMysRADMzERIBFzkRMxEzETMRMzMRMzIxMAEGBCMiJCcjNTMRIREhESERMxUFMjUhFBYEpAX++Pf6/v0DoKABMQGiATGg/WjH/l5rAdfx+vP4xwHA/kABwP5Ax/b2fHoAAAAAAgAA/+wFLwReAB0AJABhQDMJGQEOIggRDRoWAAEBIRQWDQgGJSYBIgkKCWVZHBMKCgQWEQ8aDg8OYFkXDw8EHl5ZBBYAPysAGD8zKxEAMxI5ORE5GC8zMysRADMzERIBFzkRMxEzETMRMzMRMzIxMAEjBgAhIAA1NSM1MzY3IzUhFQYHISYnNSEVIxYXMwEyNjchFhYFL1wE/s3+/P73/s1cfzJ49gIbkTICFyyTAhnwdDR7/Wl9lwz9vwyaAdfn/vwBCeACx4RX5eFVioRb4eVRiv47g3uAfgAAAAIAoP4pBLQGFAAgAC0ATEApAhUVEB4IGysrCBADLi8WDR0DCxgABV9ZABEAEBUYIV1ZGBALKF1ZCxYAPysAGD8rABg/Py8rERIAFzkREgEXOREzETMRMxEzMTABIic1FjMyNjU1ByMiJyMHIxEhERQHMzYzMhIREAcVFAYDIgYHFRQWMzI2NTQmAxRcRzIwMzMZGsVwFTPpATEMDGvSxuB9lPdxaAJrdF5vcP4pG9UTM0RxAo97BhT+lkWYpv7L/vP+4536saMFVougIbScraWlpQAAAgBc/ikE1wYUACAALQBVQC0CJCsRHBkXJB4ICCQRAy4vCxYOFAAFX1kAGgAUKF1ZFBAKHF9ZChUOIV1ZDhYAPysAGD8rABg/KwAYPy8rERIAOTkREgEXOREzETMzMxEzETMxMAEiJzUWMzI2NTUjJyMGIyICERASMzIXMyY1ESERMxEUBgEyNjc1NCYjIgYVFBYDtF1HNC8zM1I7DWjVxeHlydNvChcBMmaU/ip1bQVvfWZxcv4pG9UTM0SDkaUBMgEPARMBM6R9YgFm+sv+nrGjAraIoyG0nK2lpaUAAAABACn+KQN1Bh8AIwBXQC0UJR0hAgwKGx8fDgohCAoIJCUABV9ZABIXXVkSAQseDhsbHmBZGw8KH19ZChUAPysAGD8rEQAzETMYPysAGC8rERIBOTkRMxEzMxEzETMzETMRMzEwASInNRYzMjY1NSMRIzU3NTQ2MzIXByYjIgYVFSEVIREzERQGAUZdRzIwMzOZqKi8z557TlxOQToBCP74ZpT+KRvVEzNEgwN5k1JSv7Av4B1NPEbl/Wb+nrGjAAACAFz+FAaJBHMAKgA2AGlAOAILEDQdCxcmIy4oCAguFx0ENzgYIhogJgpiWSYmGiAABV9ZACQPIDJdWSAQGiteWRoWDhNfWQ4bAD8rABg/KwAYPysAGD8vKxESADkYLysREgA5ORESARc5ETMRMzMRMxEzMxEzMTABIic1FjMyNjU1IRUUBCEiJzUWMzI1NTcjBiMiAhEQEjMyFzM3IREhERQGATI2NzU0JiMiERQWBWZcRzIwMzP+5v7n/ur1rcvp6wUFa9LJ3eXJznYIGQECAhiU/Hp4ZwZve9dr/ikb1RMzRK4t8flC9Fb+Eo2lATYBCwETATOkj/ye/oGxowKyi6Als53+rqimAAAAAQCg/ikE2wYUABsATUApFhoCFxIOFwsaCAgLDgMcHQsXEwMMDA4VAAVfWQAPABUPDhUKGF9ZChUAPysAGD8/Py8rERIAOREXMxESARc5ETMRMxEzETMRMzEwASInNRYzMjY1NSMBBxEhESERBzM3ASEBATMREAOqXUc0LjM0Of6+g/7PATEQBIUBOQFY/kQBMX3+KRvVEzNEgwHFaf6kBhT9Sv6qAVT+G/5m/p7+rAABAHH+KQI3BhQAEQAsQBYHAA8CDQ8NEhMFCl9ZBRAADwBfWQ8VAD8rABg/LysREgE5OREzETMzMTAlMxEUBiMiJzUWMzI2NTUjESEB0WaUj1xHMjAzM5kBMd/+nrGjG9UTM0SDBhQAAAEAoP4pB6gEcwAxAFdALgIKHxwSEy0KLwgIChMcBDIzJSAcIwAFX1kAHQ8THBUOFyMXXVkpIxAKLV9ZChUAPysAGD8zKxEAMxg/Mz8vKxESADk5ERIBFzkRMxEzETMRMxEzMTABIic1FjMyNjU1IxE0JiMiBhURIRE0JiMiBhURIREzFzM2NjMyFzM2NjMyFhURMxEUBgaFXUc0LzMzmlFXcG/+z1FXdWr+z+kpES2qbvtZGy2vbr7DZpT+KRvVEzNEgwKNeXmgrv3PAo15eazF/fIEXo9NV6ROVsPX/gb+nrGjAAEAoP4pBQ4EcwAiAEhAJgIKFhMeCiAICAoTAyMkFgoaAAVfWQAUDxMVGg5dWRoQCh5fWQoVAD8rABg/KwAYPz8vKxESADkREgEXOREzETMRMxEzMTABIic1FjMyNjU1IxE0JiMiBhURIREzFzM2NjMyFhURMxEUBgPsXUcyMDMzmVZegHL+z+kpETOzcsPKZpT+KRvVEzNEgwKNeXmrxv3yBF6PUVPTx/4G/p6xowAAAgCg/hQEtARzACAALABOQCoCFQ4OEh4IGyoqCBIDLS4WDR0DCxgABV9ZABMPGCFdWRgQCyhdWQsWEhsAPz8rABg/KwAYPy8rERIAFzkREgEXOREzETMRMxEzMzEwASInNRYzMjY1NQYjIicjFhURIREzFzM2MzISERAHFRQGAyIGBxUUFjMyETQmAxRcRzIwMzMYG8VwEBD+z/grDmvSxuB9lPdxaAJrdM1l/ikb1RMzRHMEj4wW/jsGSpGm/s7+8P7nnf6xowVWi6AhtJwBUqWlAAABAHH+KQN3BHMAHgA/QCECDQocCBMICgMfIA0KEQAFX1kAERZkWREQCw8KGl9ZChUAPysAGD8/KwAYLysREgA5ERIBFzkRMxEzMzEwASInNRYzMjY1NSMRMxczNjYzMhcDJiMiBhURMxEUBgEUXEcyMDMzmectDzSxaD4pFyU1kqNmlP4pG9UTM0SDBF68XnMJ/uIKlof+pv6esaMAAAAAAQBc/ikDrARzADMAS0ApITECJw4nHDEUFAgcAzQ1HCcULjAJBgsfAAVfWQAfJWBZHxALEl9ZCxYAPysAGD8rABgvKxESABc5ERIBFzkRMxEzMxEzETMxMAEiJzUWMzI2NTUGIyImJzUWFjMyNTQmJicuAjU0NjMyFwcmJiMiFRQWFx4CFRQHFRQGAjVdRzQvMzNAS3qsS1XVUaYsbFqBeTfn1Mq/XFSSTIdXk4N6OlSU/ikb1RMzRHkKISD8KDZgJC05JjZcd1eVo1jcJC5JKTw7NVx4U45Y6bGjAAH/w/4UA+kGIwAnAExAJxAdCAIjCwsWJQgWCCgpIwpiWSMjDhoABV9ZABofXVkaAQ4TXVkOGwA/KwAYPysAGC8rERIAORgvKxESATk5ETMRMxEzMxEzMjEwASInNRYzMjY1NSEVFAYjIic1FjMyNjURNDYzMhcVJiMiBhURIREUBgLHXUcyMDM0/uWxrXI+PTMzOrOvYEw5NjE9AhiU/ikb1RMzRK6ktb4f6hVCOQU7u6of6RQ9PvxI/oGxowAAAAEAAP4pBI0EXgAYAD1AHwkaABAKBQwWFgUEAxkaBAAYDhNfWQ4IAA8YCl9ZGBUAPysAGD8zLysREgA5ERIBFzkRMxEzMzIRMzEwESETFhczNjcTIQEzERAhIic1FjMyNjU1IQE/2CQJBgUo1wE//qro/t1dRzQuMzT+iQRe/YN5bGCFAn38gf6e/qwb1RMzRIMAAAABAAr+KQSFBF4AGABPQCkTFwILDw0UEQsXCAgLDg0EGRoUCxERCwoPAAVfWQASDw8NFQoVX1kKFQA/KwAYPz8zLysREgA5ORESORESARc5ETMRMzMRMxEzETMxMAEiJzUWMzI2NTUjAwMhAQEhExMhARMzERADVF1HNC4zND7r7P6mAXv+mAFa2dsBWv6U53f+KRvVEzNEgwF//oECOwIj/pwBZP3d/qT+nv6sAAAAAAEAN/4pA6oEXgAVAEZAJQwIEBMCEwgIEQ0KBBYXAAVfWQAQDQ4ODV5ZDg8LEQoKEV5ZChUAPysREgA5GD8rERIAORgvKxESARc5ETMzETMRMzEwASInNRYzMjY1NSE1ASE1IRUBIREUBgKHXUc0LzMz/YsCBv4ZA0L+CAIKlP4pG9UTM0SDtALB6cb9Uf6UsaMAAAACAFb+KQVqBHUAJgAwAHdAQiUyFysNHREwHwMDMA0DMTIHBRkRJ2dZPxFPEQIDDxEBDQYREQUZACJfWQAPFAEMBhkUYFkZEAUdX1kFFQotX1kKFgA/KwAYPysAGD8rAF9eXRgvKxESADkYL19eXV9dKxESADkREgEXOREzETMzETMzETMxMAEiJjU1IycjBgYjIiY1NDY3NzU0IyIHJzYzMhYVETMRFBYzMjcVBgEHBgYVFDMyNjUEvIuNPjsITaODobn5+8KuhrVlwevh8GczMzAyR/3pdoWClGp//imjnJiYYUu4qrKpCQYxqlHOZcTI/fb+nkI1E9UbA90EBFhagXplAAIAXP4pBaAEcwAeACoAU0AsHSwpDBUSIhcDAyIMAyssEQcFEwAaX1kAEw8PJl1ZDxAFFV9ZBRUJH15ZCRYAPysAGD8rABg/KwAYPy8rERIAOTkREgEXOREzETMzETMRMzEwASImNTUjJyMGIyICERASMzIXMzchETMRFBYzMjcVBgEyNjU1NCYjIgYVEATyjI1SOw1o1cXh5cnTbwgbAQJmMzMvNEf9FH1qb31mcf4ppJuYkaUBMgEPARMBM6SP/IH+nkI1E9UbArKXmCG0nKiq/rIAAgBc/hQFTgYjACgANQBXQC4SHh43MwMMIhgsCSYmIgMDNjcnCAAGDxRdWQ8BBjBdWQYQACldWQAWIBtdWSAbAD8rABg/KwAYPysAGD8rERIAOTkREgEXOREzMzMRMxEzETMRMzEwBSICERASMzIXMyY1NTQ2MzIXFSYjIgYVERQWMzI3FQYjIBE1NDY3IwYnMjY3NTQmIyIGFRQWAgLF4eXJ028KF7OwYEw5NjE9OTM0PT59/qwKAw1oanVtBW99ZnFyFAEyAQ8BEwEzpH1iELuqH+kUPT76zzlCFeofAWlMN2YrpfOIoyG0nK2lpaUAAgBc/ikE7ARzAB8AJgBtQD4YKAsDEh0KJCQdAwMnKA8LAQ0FIwtmWaUjAWkjAQwjHCMCEAMjIwAGGhVfWRoPIAEMBgYgX1kGEAAOX1kAFgA/KwAYPysAX15dGC8rERIAORgvX15dXV0rAF9eXRESARc5ETMRMxEzETMxMAUgABEQADMyABUVIRYWMzI2NxEUFjMyNxUGIyImNTUGAyIGByEmJgKa/vL+0AEZ+O0BCP0vBZCCZbRiMzMvNElmi41VYWFuCAGsAnIUASoBEQEZATP+8u6UgpIqLv5WQjUT1RujnIwIA657cXF7AAABAE7+KQTsBHMAMgCLQFExNBUrIQMNDAwlJQkcDysDAw8JAzM0DSIfHyJ7WWsfAVkfAbkfAQQfAXQfAWIf8h8CPB/MHwIECx8BCgYfHwYSAC5fWQASGWBZEhAGJ19ZBhUAPysAGD8rABgvKxESADkYL19eXV9dXV1xXXFxKxESADkREgEXOREzETMRMxEzETMRMxEzETMxMAEiJjU1BiMgJDU0Njc1JjU0NjMyFhcHJiYjIgYVFBYzMxUjIgYVFCEyNjcRFBYzMjcVBgQ9i41UYP72/ueAkNX+6nPpWF53ik1xboWPmqiSkwEMZ9xZMzMvNEn+KaOcjgqlpGuGHAox0Y2YLibdMB8yNkI300FIfS0p/lBCNRPVGwAAAAEATv4pBCMEcwA0AIlAUBoDIiMjCjMOLS0DIBMmCgoTAwM1NiIODw8Oe1lrDwFZDwG5DwEEDwF0DwFiD/IPAjwPzA8CBAsPAQoGDw8qHQAwX1kAHRZgWR0QKgdgWSoWAD8rABg/KwAYLysREgA5GC9fXl1fXV1dcV1xcSsREgA5ERIBFzkRMxEzETMRMzMRMxEzETMxMAEiJjURFhYzMjY1NCYjIzUzMjY1NCYjIgYHJzY2MzIWFRQHFRYWFRQGBiMiJxUUFjMyNxUGAWaLjVbNYJWUnKJ2cK2RanpNw1Bad+CK0fzfiXWE+qlwQDMzMDJH/imjnAHFKC5DPkRB0zg9NjYmIdUtJ6CJvTkKIn1lZp5WCHdCNRPVGwAAAgBY/ikGHQRzACIAKQBsQD4LKx8nFwUDEBAZJhcEKisRJxgnZlkDDxgfGALPGN8YAhkYAQMPGAELBhgYFAANCF9ZDQAcYFkAEBQjX1kUFgA/KwAYPysAGC8rERIAORgvX15dX11dcTMrEQAzERIBFzkRMzMRMzMRMzEwATIAFzMRFBYzMjcVBiMiJjURBgQjIgA1NSEmJiMiBgc1NjYTMjY3IRYWAiH8ASoT+jMzLzRHZ4yNHP7y3uz+9gLRBZCCX7JpVb+gWXUJ/lQCbwRz/vnx/QJCNRPVG6SbAlLZ9QEL8JSCkiYy7Cwk/FJxenB7AAAAAAIAk/4pAwAGFAARABoATEApEBwWCAgSBQoDBQMbHAANX1kAGRkBAw8ZAQoGFBljWRQABg8FCF9ZBRUAPysAGD8/KwBfXl1fXRgvKxESATk5ETMRMzMRMxEzMTABIiY1NSMRIREzERQWMzI3FQYBNDMyFRQGIyICUoyNmQExZjQzLjRH/dqmplNTpv4ppJuYBF78gf6eQjUT1RsHVpWVR08AAAAAAQA//ikDwQRzACIAOUAdEAMhChsDFQoDCiMkAB5fWQASDV1ZEhAYB11ZGBYAPysAGD8rABgvKxESATk5ETMRMxEzETMxMAEiJjURFhYzMjY1NCYjIgcnNjMgABEQACEiJxUUFjMyNxUGAXGMjUaSX3KIjHhki1eW0wEDARb+6v77KiYzMy80R/4ppJsB3C8yqKapo0LsTP7c/uL+6f7SBHNCNRPVGwAAAf/D/ikCrgYjACEAOUAdECMaAwMgFhYJCSIjAB1fWQANEl1ZDQEZBl5ZGRYAPysAGD8rABgvKxESATkRMxEzMhEzETMxMBMiJjURFjMyNjURNDYzMhcVJiMiBhURFAYHFRQWMzI3FQbbi409MzM6s69gTDk2MT2LhTMzMDJH/imjnAGMFUI5A2S7qh/pFD0+/KqfuxJ1QjUT1RsAAQCa/ikF0QReACIASEAmISQQDRkGGwMDBhYNBCMkBwoOAB5fWQAXDg8FGV9ZBRUKE11ZChYAPysAGD8rABg/My8rERIAORESARc5ETMRMxEzETMxMAEiJjU1IycjBgYjIiY1ESERFBYzMjY1ESERMxEUFjMyNxUGBSOMjVIpEDG0c8XIATFWXoByATFmNDMuNEf+KaSbmI9OVdPGAtn9c3l5q8YCDvyB/p5CNRPVGwABAE7+KQQjBF4AJgBpQDcGASIFCRYmEAIQGwkiGyInKAAmBgYmZlkPBh8GAhADBgYNAxgTX1kYBQIDAwJeWQMPDR9gWQ0WAD8rABg/KxESADkYLysREgA5GC9fXl0rERIAORESATk5ETMRMzMRMzMRMxEzMzEwASUhNSEVBRYWFRQGBiMiJxUUFjMyNxUGIyImNREWFjMyNjU0JiMjARsBP/4IA57+rsOyg/qqcEAzMzAyR2eLjVbNYJWUkqGsAn346cb4HKGTa6NWCHdCNRPVG6OcAcUoLkM+QzgAAgCNAqYDXAXFABEAHAAyQBgVDgkJCwMbCxsdHggPBgxLC04SAEwYBk8APzM/Mj8/ETk5ERIBOTkRMxEzETMzMTABMhYVFAYjIicjByMRMxczNjYXIhUVFBYzMjY1NAI5ipmfipJLBhOwqh0KIW8EolJWREsFxde3vtNxYwMCZDQ/psUrd2h2c+YAAAABAF4CpgLHBcUAEwAeQA4MAgcRAgMUFQoFTA4ATwA/Mj8zERIBFzkRMzEwASARNDYzMhcHJiMiFRQzMjcVBgYBxf6Zw7Z+cj5uRKKicmcwaAKmAYvFzzSjLejjQ7QeGwACAF4CaAL4BcUAHgAoAEVAIhgHABMMEQcmJhEPEwQpKgIhBCMRDBYOCh8EBAocFkwjCk8APzM/MxI5LzMQzhI5ORESOTkREgEXOREzETMRMxEzMTABFBc2MzIWFRQGIyInBgcnNjcmNTQ2MzIXByYmIyIGFyIHFjMyNjU0JgE1BFuDZnuWh4xTEyBpHSlIycJ8cD0wUi1eVtNcRilhMT0qBDMnGFRpXGl0NyJTMElMbZfIzDSjFBlv4UxAKCIdJQACAFICpgM7BvoAGwAnAFhAMhccIhEIGQscHBkAAhsRBigpCAUOAxkAFgIWHwAUEBQgFHAUgBQFFBQOBgIDRwZHJQ5PAD8zPz8zERI5L10zORESOTkREjk5ERIBFzkRMxEzETMRMzEwASYnNxYXNxcHFhYVFAYjIiY1NDYzMhc3JicHJwE0JiMiBhUUFjMyNgFxKj9GYlCcQ3Vnasiuqsm1l4sxBjNSnkUBbFNKVUlPT1RJBj8dI3svM2BrR1/6qcXZwaGivkMCdVBjbf4xSlxiY19lcQAAAAABAEgCpgLsBcUAIwBcQDUJGhoiERISIg8DFR8fAyIDJCURIiIEIwH0IwHiIwGsIwEEmSMBDyMfIy8jAyMjGAYMTB0YTwA/Mz8zEjkvXV1fXV1dcTMSORESARc5ETMRMxEzETMRMxEzMTABMjY1NCYjIgcnNjYzMhYVFAcVFhYVFAYjIic1FjMyNTQjIzUBP2lzSFR7eT1fnk+Qq5peU8yvxWSBic3bUgSPIS8lJTGTIhhvXoMnBhdVRm6CMaw7WlyRAAAAAQBCArQChQbyABQAMUAXDRYAEwICBwMFAxUWA04QC0cEAQcBE0sAPzMzETM/Mz8REgE5OREzMxEzMxEzMTABIxEjESM1NzU0NjMyFwcmIyIVFTMCO7bRcnKAkWhYNU8mVrYFGf2bAmVmNz6AfiGaE142AAAB/6gBYgGwBbYAFAAuQBUCCQ4SEgsHBxUWEQkJDgoKDEsFAE0APzI/OS8zMxEzERIBOREzMxEzMjIxMBMiJzUWMzI1ESM1MxEzETMVIxEUBjFUNS8zZ3Fx0W5ujQFiEaYPdwF3iQE1/suJ/mh7gwAAAgBeAWIDLQXFAAsAJgA2QBsRCR4MJAMDGB4DJygZIxshJUsHIUwAG08UD00APzM/Mz8zPxESOTkREgEXOREzMxEzMzEwATI2NzU0JiMiFRQWBRQGIyInNRYzMjU1NyMGIyImNTQ2MzIXMzczActTRwNLVJRIAbDBwK1yiaKiBARJkIibnYqOUQYRsgNMYWwae23qc3KapastqDuuDGNz0rvA0nFiAAAAAQCNAWgDUgW2ABUALUAVDQoVEgMACgAWFwQHCwBNEwtLDwdPAD8zPzM/ERI5ERIBOTkRMzMzETMxMAERNDcjBgYjIiY1ETMRFDMyNjURMxECgQgIJW1RhovRfVdP0QFoATwZXDs4kYoB9f4+pnaIAWr7sgACAAACtAGuBuUACwATAFxANwcAEAQEDAkFBRQVEl8OAW8Ofw4CDgqAAwcHAA0IAewI/AgCqggBmQgBDwgfCC8IAwgIBQpLBU4APz8SOS9dXV1dcTMzETMaEMxdcTIREgE5ETMzMxEzMzIxMAEzFSMRIxEjNTMRMyc0MzIVFCMiAT9vb9Bvb9DZcXNzcQSBif68AUSJATXHaGhmAAABAI0CpgI/BbYADAAaQAsBCwsGDQ4MSwMITwA/Mz8REgE5OREzMTABERQzMjcVBiMiJjURAV5cO0pQZIN7Bbb98FgYnSOBfwIQAAAAAQBGArQCCgW2AAsAJEAQAQkGCwkLDA0FAAJLBgsJTgA/MzM/MzMREgE5OREzETMxMBMnNSEVBxEXFSE1N754AcR7e/48eAUXO2RkO/4/O2dnOwAAAQBGArQCCgW2ABMAVEAwBQERCg4OAxMRExQVDQEBCg0CAewC/AICqgIBmQIBDwIfAi8CAwICEQkEBksOExFOAD8zMz8zMxI5L11dXV1xMzMRMxESATk5ETMzETMRMzMxMBMjNTM1JzUhFQcVMxUjFRcVITU3vm5ueAHEe29ve/48eAP4iZY7ZGQ7lomiO2dnOwAAAAP/pAFiAcUG5QAPABcAIQBNQCofCwsCFAUQDxoEBRoFIiMWXxIBbxJ/EgISgABLBRwcAg5ACQ9IDk4YCE0APzM/KzMzETM/GsxdcTIREgE5OREzETMzETMzMhEzMTATMxEzFSMGBiMiJjU0NjMzAzQzMhUUIyIDMjU1IyIGFRQWk9FhYwl7b1twZmMmCHFzc3E1PSInJxwFtvz+bG93WkpTWwPJaGhm+75IKyYaFR4AAQCNAWICAAbpAAwAHEAMBg4ACgoNDgtGAwhNAD8zPxESATkRMxEzMTABFBYzMjcVBiMiNREzAV4qKiMrKVjy0QJgKC4PoBf6BI0AAAH/9AFiAWYG6QANABpACwcCDQ0ODwBGCgVNAD8zPxESATkRMzIxMBMzERQGIyInNRYzMjY1ltB4eVgpKyMqKgbp+3OBeRegDy4oAAABAJECtAKkBbYABQAaQAsDAAAFBgcBSwMATgA/Mj8REgE5OREzMTATETMRIRWR0QFCArQDAv2gogAAAAABAJEBYgUhBcUAKgA+QB8fAAwJAAEaJCQBCQMrLBMMCQpLAQlOJwQEFhBMIh1NAD8zPzMzETM/Mz8SOTkREgEXOREzETMRMxEzMTABIxE0IyIGFREjETMXMzY2MzIXMzY2MzIWFREUBiMiJzUWMzI1ETQjIgYVA0LRdVFJ0aAdCiJ2Sa09EiF1ToKGgnhOMSkpVnVTRgK0AcOmdoj+lQMCYjk4cTc6hpX9tniGEaYPdwH2pnZwAAAAAQCLAWIFGwW2ACIAO0AdDgsWEx4bIh8fEwsDIyQABQgcFAxLH00YEBACCE8APzMzETM/PzMzEjk5ERIBFzkRMzMzETMRMzEwAQYjIicjBgYjIiY1ETMRFDMyNjURMxEUMzI2NREzESMRNDcESEeOrzsSInhKg4XRdVNG0XNTR9PTCgMZc3E3OoaVAfX+PqZ2bwGD/j6meYUBavusAUIoTQAAAAAB//gBYgNWBcUAHgAsQBUOFxQAARQBHyAXARVLAU4FG0wRDE0APzM/Mz8/EjkREgE5OREzETMyMTABIxE0JiMiBhURFAYjIic1FjMyNjURMxczNjYzMhYVA1bRPEFXT3t0ViUlKSMooB0KJYBJhYsCtAHDU1N2iP5BfoAVog8sKANYYjw1j4wAAQCRAWID8AXFAB0ALkAWBh8WEwAKEwoeHxYTFEsTTg4aTAMITQA/Mz8zPz8SORESATk5ETMRMxEzMTABFBYzMjcVBiMiNRE0JiMiBhURIxEzFzM2NjMyFhUDVikjKSUlXOo8QVdP0aAdCiWASYWLAl4oLA+iFfgCHVNTdoj+lQMCYjw1j4wAAAEAkQK0A6wFtgANAChAEwMHAQoKDAcDDg8KAwcNCEsHAk4APzM/MxI5ORESARc5ETMRMzEwAREjARcXESMRMwEmNREDrP7+oAcHy/wBYw8Ftvz+AhtYkP7NAwL95LgzATEAAAADAF4CpgNIBcUACwAQABUASEAqEw8PBgAUDgYOFhcPDRMB7BP8EwKpEwGZEwEPEx8TLxMDExMDEQlMDANPAD8zPzMSOS9dXV1dcTMREgE5OREzMxEzETMxMAEUBiMiJjU0NjMyFgEyNyEWEyIHISYDSMivqcrFsqnK/ouGFP7KFoaGFgE2FAQ3v9LXurvT1/5ioKABy5qaAAMAXgF3BEYG+AARABgAHwBCQCESDAAdBwcPFQgDGRkIDAMgIQhNEEccFhYAD0wdFRUGCU4APzMzETM/MzMRMz8/ERIBFzkRMxEzMzMRMzMRMzEwARYWFRQGBxEjESYmNTQ2NxEzARQWFxEGBgU0JicRNjYCtL7U0MLAvdnQxsD+fWZdWmkCQmdYXWIFyRLRp6vPEf7DAT0S0qeyyg4BL/1HX34MAdMMgF5efgz+Lwx+AAAAAAEAXgFiAqYFxQAsADhAHSEAJxwGEAAWFhAcAy0uJxwWAAQDJR9MFANPCQ5NAD8zPzM/MxIXORESARc5ETMRMxEzETMxMAEUBiMiJxUUFjMyNxUGIyI1ERYWMzI1NCYnJiY1NDYzMhcHJiYjIhUUFhcWFgKmpJ40IiElIiIqR8c/lDFzPmp6V5+SiYZAOWU1XDxkgFcDmnd9CmYoKAyRE+gBNx4kRB8sLjJoWGZxPpUYHzMbKSk0aQAB//gBaAH6BvIAFwAgQA4SGQUACwsYGQgDTRQPRwA/Mz8zERIBOREzMhEzMTABFAYjIic1FjMyNjURNDYzMhcVJiMiBhUBYnt0UyglKSMoenpEMSQoIioCZn6AFaIPLCgDmoFzFaANKSsAAAABADEBYgJIBm0AIAA8QB4WHhQYGA0eCAgCDQ8EISITExEUDhcXFEsbCU8FAE0APzI/Mz8zETMRMzMvERIBFzkRMxEzETMRMzEwASInNRYzMjY1NSMiJjURIzU3NzMVMxUjERQWMzI3ERQGAX9FLCIiIyMTe3Rlcz2F19czKjtKZgFiE5EMIy9af4EBc1hHtbed/o0tKxj+5npwAAACACMCpgPBBbYAFwAdAF5ANRsPHBwMCBMXEBoXAQEaCAoEHh8CBQ0WGwoKEw+ZC6kLAogLAQ8LHwsvCwMLCxENSwBOGAVPAD8zPz8zOS9dXV0zMzMRMzMREjkREgEXOREzETMRMxEzMxEzMzEwAScjBgYjIiY1NSM1MxEzESERMxEzFSMRJTI3IRUUArIcCyWASIaLamrRASPRb2/+iZQS/t0CtGM7NpGKRYkBJ/7ZASf+2Yn+rpq4EqYAAAEAKQKmA3UFtgAcAD1AHgkMFRoSGBQKBgwDAwYUEgQdHhgGFgoVFQcWSwAPTwA/Mz8zMxEzEjk5ERIBFzkRMxEzETMRMzMRMzEwATI2NTQmJzUhFSMWFRQGIyImNTQ3IzUhFQYVFBYBz1tuS0sBc6aJ0re01Y2qAXOWbgNKZmlZfC2bnWK5nbu2nLxlnZtZqWlmAAABAIsCpgNOBbYAEQAgQA4GAw8MAwwSEw0ESwkATwA/Mj8zERIBOTkRMxEzMTABIiY1ETMRFBYzMjY1ETMRFAYB7K+y0UlPREPTtAKmsb0Bov5WZFhYZAGq/l63twAAAAABAIsCpgNqBcUAGwAnQBIVDwkGAA8GDxwdExhMB0sMA08APzM/PzMREgE5OREzETMRMzEwARQGIyImNREzERQWMzI2NTU0JiMiBzU2MzIWFQNqvLa2t9FPV0pMJyMhKzJTcHMEFLa4sb0Bov5WZFhYZLsoLg+iFX56AAAAAf/0ArQDFAW2AAsAGkALAQ0KDAYLAgpOC0sAPz8zEjkRATMRMzEwAQEjAyYnIwYHAyMBAfABJNuTGwQEBRqV2wElBbb8/gG2XUE+YP5KAwIAAAAAAQBSArQCsAW2AAkAMEAWAAcECAEHAwEDCgsCCAEHBAQFSwgBTgA/Mz8zEjkREjkREgE5OREzETMzETMxMAEhNQEhNSEVASECsP2iAWT+sAI+/qYBZgK0fQHmn4n+KQAAAQBSAWIDOwW2ABUANUAaCAwFCwcOAwMHBQMWFwsICAlLBgwMBU4RAE0APzI/MxI5PzMSORESARc5ETMRMxEzMzEwASImNTUhNQEhNSEVASERFBYzMjcVBgLDWGn+UAFk/rACPv6mAWYlIx8kLgFiZ3R3fQHmn4n+Kf72LyMMkRMAAAAAAgBSAjkDVgW2ABcAHwBLQCUDDQkNBh8FDAgUGhoIBQYEICECBhwREQYMCQkKSwcGHw0NAAZOAD8zMxEzEjk/MxI5EjkvMxDEERIBFzkRMxEzETMRMzMRMzEwAQYHJzY3ITUBITUhFQEzNjYzMhYVFAYjNzI1NCMiBgcB0ygJbxAT/vwBZP6wAkT+nFZAeVdNXYiPFXkxJjQdArRgGycuJn0B5p9//h98Y11PbmeiPTQ9NAAAAQAtAWgDAgW2ABgANkAaAg0GGAUBCBQUARgDGRoAFwYGAwtNBQICA0sAPzMSOT8SOS8zMxESARc5ETMRMxEzMzMxMBMBITUhFQEEERQGIyInNRYWMzI2NTQmIyPJARb+awKZ/s8BUNzIrIVEnEFrdIeLUgQOAQmfif7jKP7UnrY4tCIiW1xaWAAAAAADAF4CpgNGBvoACwASABkAVEAzFhAQBgAXDwYPGhsQWhYBDxYB/hYB3RYBvBbMFgKKFgF5FgEPFh8WLxYDFhYDEwlHDANPAD8zPzMSOS9dXV1dXV1xcTMREgE5OREzMxEzETMxMAEQAiMiAhEQEjMyEgEyNjchFhYTIgYHISYmA0a7urS/tr23vv6LUEoE/sQDSVJPRwYBOgdJBNH+5P7xARkBEgEYARH+5v1foqSipAMhnp+fngAAA/5eBPgBogaTAAoAFQAdABdACQ0TExoCCBqAFwAvGs3EMhI5LzMxMAE0MzIWFRQGIyImJTQzMhYVFAYjIiYHIyYnNSEWF/5egTlKSjk7RgI+gz5FSzg8Rw+JgDoBBhAtBX2HQEdEQUHTh0dAREFBr7OAFISpAAAAA/41BPgBzQaTAAoAFQAeABdACQ0TExkCCBmAHgAvGszEMhI5LzMxMAE0MzIWFRQGIyImJTQzMhYVFAYjIiYFNjY3IRUGByP+NYE5Sko5O0YCkoM5Sko5PEf+lw8nCAEGOIOJBX2HQEdEQUHTh0BHREFBlTG8QBR8twAAAAH/hf5OAGb/qgArAB1AEisQACAAYABwAAQAExAYIBgCGAAvXTMvXTIxMBMiNTQ+AjU0LgI1ND4CNTQjIgcnNjMyFRQOAhUUHgIVFA4CFRQXWrIhKCEhKCEjKyMyLC4IQzZoICYgHSQdHSQdWv5OPxAVDgsGCAkKEBAPFxIQCBEVNxs9GR0TDQkGBwsTEhETDAkHDgQAAAAAAf51BLwBjQX6ABUAJUAYABNfCm8KfwoDCoAODwVfBX8FrwXPBQUFAC9dMxrdXcQyMTABIg4CIyImJjU1MxYWMzI2NzYzMxUBf06Fd3A6aXU4tgsrLh5JS4W3EAU1Ji0mO3dbMTs0GB82wwAAAf7RBMUBPwYEAAUAGUAQAgUPAG8AfwCfAK8AzwAGAAAvXTLGMTABITcXBSH+0QEj9Vb+3v60BX+Fop0AAf7BBMUBLwYEAAUAGUAQAwEPBG8EfwSfBK8EzwQGBAAvXTPNMTABISU3FyEBL/60/t5W9QEjBMWdooUAAf7RBLQBPwX0AAUAFUALAwUFXwCfAK8AAwAAL10yETMxMAEhBQcnIf7RAUwBIlb1/t0F9J6ihQAB/sEEtAEvBfQABQAVQAsCAQFfBJ8ErwQDBAAvXTMRMzEwASEHJyUhAS/+3fVWASIBTAU5haKeAAH+OQSsAccF+AAHACtAHgIHDwRfBG8EAwQDBAYDDwAvAF8AfwCvAM8A7wAHAAAvXRcyL10zMzEwAyU3FyUFByeW/s9W6gEdATFW6gSsoqJ9haKifQAAAAAB/jkErAHHBfgABwArQB4FAA8DXwNvAwMDBAMBAw8HLwdfB38HrwfPB+8HBwcAL10XMy9dMzMxMAMHJyUFNxcFh+pWATEBHepW/s8FMX2iooV9oqIAAAAAAf9K/hQAtP++AA8AHLcNCgsAChsFALj/wLMLD0gAAC8rMj8QxBE5MTAXMhcHJiMiBhUVIxEzFzY2aCclFB0lPjSikQgXOkIIlwxMVHcBoEUjLAAAAf9UBLgApAZSAAwAH0AWHwUvBT8FAwUPAC8AXwB/AK8AzwAGAAAvXcRdMTATJic1NjczFQYHFhcVjdpfeMEXLZB1SAS4bR2LHmdpHUc6LGcAAAAAAv6H/hQBj/+uAA8AHQAjQBQLCAQXDggQFyAXMBcDF08dXx0CHQAvXcZdxDIQxBE5MTAFBgYHIyYmJzUzFhc3NjczJRYXFQYGByM1NjcmJzUBjyBOHYsVUh5oKj0XKiNo/Q/VZC+rXxcnlm5PhT3IW1TaMhg6qzxxOBtqH4sMSDJpGko2MWYA//8AuAAABPQHZgImACUAAAEHAU8BYAFSABW0AygFJgO4/8O0ISUPCyUBKzUAKzUA//8AoP/sBLQGFAImAEUAAAEHAU8B1QAAAA65AAL/zLQgJA0DJQErNf//ALj+UgT0BbYCJgAlAAABBwJkBRsAAAAOuQAD/8S0ISUPCyUBKzX//wCg/lIEtAYUAiYARQAAAQcCZAUnAAAADrkAAv/8tCAkDAMlASs1//8AuP6hBPQFtgImACUAAAEHAU0AOfnIAB5ADAMAJFAkcCS/JAQkA7j/0bQkIw8LJQErNQARXTX//wCg/qEEtAYUAiYARQAAAQcBTQAb+cgAHkAMAgAjUCNwI78jBCMCuP/ftCMiDAMlASs1ABFdNf//AHf+FATRB3MCJgAmAAAAJwB6Ah0AAAEHAHYBCgFSABtAEgIyBSYBUx0XDQglAtIqLg0TJSs1KzUAKzUAAAD//wBc/hQD4wYhAiYARgAAACcAegGDAAABBgB2VgAAFEAOATYcFgITJQKmKS0CByUrNSs1//8AuAAABXUHZgImACcAAAEHAU8BjQFSABW0AhcFJgK4/7C0EBQFACUBKzUAKzUA//8AXP/sBHEGFAImAEcAAAEHAU8AyQAAAAu2AjUgJAMLJQErNQAAAP//ALj+UgV1BbYCJgAnAAABBwJkBUgAAAAOuQAC/7G0EBQFACUBKzX//wBc/lIEcQYUAiYARwAAAQcCZAUMAAAAC7YCJCAkAw4lASs1AAAA//8AuP6hBXUFtgImACcAAAEHAU0AMfnIAB5ADAIAEFAQcBC/EAQQArj/ibQTEgUAJQErNQARXTX//wBc/qEEcQYUAiYARwAAAQcBTQAd+cgAG0ASAgAjUCNwI78jBCMCJCMiAw4lASs1ABFdNQAAAP//ALj+OwV1BbYCJgAnAAABBwI5AJ4AAAAOuQAC/5K0GBUFACUBKzX//wBc/jsEcQYUAiYARwAAAQYCOVQAAA65AAL/+LQoJQMOJQErNQAA//8AuP5nBXUFtgImACcAAAEHAUsATvmOADG0AkAYARi4/8BAFAkLSAAYQBhQGHAYkBigGPAYBxgCuP+ktBYdBQAlASs1ABFdK3E1AP//AFz+ZwRxBhQCJgBHAAABBwFLACX5jgAutAJAKAEouP/AQBoJC0gAKEAoUChwKJAooCjwKAcoAismLQMOJQErNQARXStxNf//ALgAAAQCCF4CJgAoAAABBwlJAmIBUgAdQBICAQ8ZHxkCGQUmAgEGGRgCCyUBKzU1ACtxNTUA//8AXP/sBGIHDAImAEgAAAEHCUkCZgAAAA23AwIIKSgKESUBKzU1AP//ALgAAAQCCF4CJgAoAAABBwlKAmIBUgAdQBICAQ8ZHxkCGQUmAgEGGRgCCyUBKzU1ACtxNTUA//8AXP/sBGIHDAImAEgAAAEHCUoCZgAAAA23AwIIKSgKESUBKzU1AP//AK3+ZwQSBbYCJgAoAAABBwFL//P5jgAutAFAGAEYuP/AQBoJC0gAGEAYUBhwGJAYoBjwGAcYAQISGQILJQErNQARXStxNf//AFz+ZwRiBHMCJgBIAAABBwFLAA75jgAutAJAHAEcuP/AQBoJC0gAHEAcUBxwHJAcoBzwHAccAhsiKQoRJQErNQARXStxNf//ALj+WQQCBbYCJgAoAAABBwFS/+r5ggAwsQEbuP/Asw4QSBu4/8BADgkLSAAbUBugG/AbBBsBuP/7tBAcAgslASs1ABFdKys1AAD//wBc/lkEYgRzAiYASAAAAQcBUv/t+YIAMLECHLj/wLMOEEgcuP/AQA4JC0gAHFAcoBzwHAQcArj//LQgLAoRJQErNQARXSsrNQAA//8AuP4UBAIHfQImACgAAAAnAHoBlgAAAQcBTv/qAVIAILQCIgUmAbj/97USDAEAJQK4//m0JR8CAyUrNSs1ACs1AAD//wBc/hQEYgYrAiYASAAAACYBTgAAAQcAegGwAAAAFEAOAw8wKgoQJQINIhwKECUrNSs1//8AuAAAA/4HZgImACkAAAEHAU8BJwFSABNACwERBSYBBQoOAgQlASs1ACs1AAAA//8AKQAAA3UHjwImAEkAAAEHAU8A+gF7ABNACwEQFhoIDSUBHQImACs1ASs1AAAA//8Ad//sBScG/gImACoAAAEHAU0ArgFSABNACwEeBSYBTR4dCAIlASs1ACs1AAAA//8AXP4UBHEFrAImAEoAAAEGAU37AAALtgICKikeJiUBKzUA//8AuAAABWYHZgImACsAAAEHAU8B1QFSABW0ARMFJgG4//+0DBAGCyUBKzUAKzUA//8AoAAABKgHZgImAEsAAAEHAU8BfQFSABNACwEdAiYBEhYaChUlASs1ACs1AAAA//8AuP5SBWYFtgImACsAAAEHAmQFjwAAAA65AAH//7QMEAYLJQErNf//AKD+UgSoBhQCJgBLAAABBwJkBSUAAAALtgEAFhoKFSUBKzUAAAD//wC4AAAFZgdWAiYAKwAAAQcAagCiAVIAF0ANAgEhBSYCAQEMHgYLJQErNTUAKzU1AAAA//8AoAAABKgHWAImAEsAAAEHAGoAUgFUABdADQIBKwImAgEcFigKFSUBKzU1ACs1NQAAAP//AI3+FAVmBbYCJgArAAABBwB6ALIAAAALtgEZGhsFBCUBKzUAAAD//wBx/hQEqAYUAiYASwAAAQcAegCWAAAAC7YBGCQlCQglASs1AAAA//8AuP5HBWYFtgImACsAAAEHAU4AovluACKxARm4/8BAEgkLSAAZUBlwGQMZAQASDAYLJQErNQARXSs1//8AoP5HBKgGFAImAEsAAAEHAU4AN/luACKxARm4/8BAEgkLSAAZUBlwGQMZAQAcFgoVJQErNQARXSs1////9f5ZAzIFtgImACwAAAEHAVL/JvmCAC2xARu4/8CzDhBIG7j/wEAUCQtIABtQG6Ab8BsEGwEFEBwGCyUBKzUAEV0rKzUA////n/5ZAtwGFAImAEwAAAEHAVL+0PmCAC2xAhu4/8CzDhBIG7j/wEAUCQtIABtQG6Ab8BsEGwIEER0ABCUBKzUAEV0rKzUA//8APwAAAw4IXgImACwAAAEHCIgBiwFSABtADwMCASoFJgMCARgVEQYLJQErNTU1ACs1NTUAAAD////rAAACugcMAiYA8wAAAQcIiAE3AAAAEEAJAwIBGg0JAgMlASs1NTUAAP//ALgAAAVQB3MCJgAuAAABBwB2ANsBUgATQAsBFQUmAUMOEgYAJQErNQArNQAAAP//AKAAAAT2B5wCJgBOAAABBwB2AKYBewATQAsBFwImAUcQFAsFJQErNQArNQAAAP//ALj+UgVQBbYCJgAuAAABBwJkBWoAAAAOuQAB/+W0DREGACUBKzX//wCg/lIE9gYUAiYATgAAAQcCZAUhAAAADrkAAf/VtA8TCwUlASs1//8AuP6hBVAFtgImAC4AAAEHAU0Af/nIAB5ADAEAD1APcA+/DwQPAbj/6bQQDwYAJQErNQARXTX//wCg/qEE9gYUAiYATgAAAQcBTQBG+cgAHkAMAQAPUA9wD78PBA8BuP/ptBIRCwUlASs1ABFdNf//ALj+UgQ/BbYCJgAvAAABBwJkBPgAAAAOuQAB//y0BgoBBSUBKzX//wCR/lIB3QYUAiYATwAAAQcCZAO4AAAADrkAAf//tAQIAgMlASs1//8ACP5SBD8HGQImAC8AAAAnAU3+7QFtAQcCZAT4AAAAHrQBCQUmArj//EAMCg4ABSUBCAkIAQIlKzUrNQArNf///+f+UgKNB1ACJgBPAAAAJwFN/swBpAEHAmQDuAAAAB60AQYCJgK4//9ADAgMAQAlAQIHBgIDJSs1KzUAKzX//wC4/qEEPwW2AiYALwAAAQcBTQAE+cgAHkAMAQAIUAhwCL8IBAgBuP/3tAkIAQUlASs1ABFdNf///+f+oQKNBhQCJgBPAAABBwFN/sz5yAAbQBIBAAZQBnAGvwYEBgEBBwYCAyUBKzUAEV01AAAA//8AuP5nBD8FtgImAC8AAAEHAUsABvmOADG0AUAIAQi4/8BAFAkLSAAIQAhQCHAIkAigCPAIBwgBuP/3tAwTAQUlASs1ABFdK3E1AP///4b+ZwLrBhQCJgBPAAABBwFL/sz5jgAutAFACAEIuP/AQBoJC0gACEAIUAhwCJAIoAjwCAcIAQAKEQIDJQErNQARXStxNf//ALgAAAbTB2YCJgAwAAABBwFPAosBUgAVtAEcBSYBuP//tBUZBw0lASs1ACs1AP//AKAAAAdCBhQCJgBQAAABBwFPArgAAAALtgEAJCgLGyUBKzUAAAD//wC4/lIG0wW2AiYAMAAAAQcCZAY1AAAADrkAAf/vtBUZBw0lASs1//8AoP5SB0IEcwImAFAAAAEHAmQGcQAAAA65AAH//7QkKAsbJQErNf//ALgAAAXJB2YCJgAxAAABBwFPAgYBUgAVtAEXBSYBuP//tBAUCA8lASs1ACs1AP//AKAAAASoBhQCJgBRAAABBwFPAWoAAAAOuQAB//+0FRkLFCUBKzX//wC4/lIFyQW2AiYAMQAAAQcCZAXBAAAAC7YBABAUCA8lASs1AAAA//8AoP5SBKgEcwImAFEAAAEHAmQFJQAAAAu2AQAVGQsUJQErNQAAAP//ALj+oQXJBbYCJgAxAAABBwFNANP5yAAbQBIBABBQEHAQvxAEEAEAExIIDyUBKzUAEV01AAAA//8AoP6hBKgEcwImAFEAAAEHAU0AN/nIABtAEgEAFlAWcBa/FgQWAQEYFwsUJQErNQARXTUAAAD//wC4/mcFyQW2AiYAMQAAAQcBSwDT+Y4AMbQBQBgBGLj/wEAUCQtIABhAGFAYcBiQGKAY8BgHGAG4//+0Fh0IDyUBKzUAEV0rcTUA//8AoP5nBKgEcwImAFEAAAEHAUsAN/mOAC60AUAYARi4/8BAGgkLSAAYQBhQGHAYkBigGPAYBxgBABoiCgAlASs1ABFdK3E1//8Ad//sBecIXgImADIAAAEHCUgDLwFSABdADQMCKAUmAwIBKDQGACUBKzU1ACs1NQAAAP//AFz/7ASYBwwCJgBSAAABBwlIAnkAAAANtwMCACw4EwwlASs1NQD//wB3/+wF5wg3AiYAMgAAAQcJRgMvAVIAG0APBAMCNwUmBAMCATdDBgAlASs1NTUAKzU1NQAAAP//AFz/7ASYBuUCJgBSAAABBwlGAnkAAAAQQAkEAwIAO0cTDCUBKzU1NQAA//8Ad//sBecIXgImADIAAAEHCUkDLwFSAB1AEgMCDyMfIwIjBSYDAgEjIgYAJQErNTUAK3E1NQD//wBc/+wEmAcMAiYAUgAAAQcJSQJ5AAAADbcDAgAnJhMMJQErNTUA//8Ad//sBecIXgImADIAAAEHCUoDLwFSAB1AEgMCDyMfIwIjBSYDAgEjIgYAJQErNTUAK3E1NQD//wBc/+wEmAcMAiYAUgAAAQcJSgJ5AAAADbcDAgAnJhMMJQErNTUA//8AuAAABKoHcwImADMAAAEHAHYAewFSABNACwIcBSYCNhUZEAklASs1ACs1AAAA//8AoP4UBLQGIQImAFMAAAEHAHYAoAAAAAu2AmIhJQgQJQErNQAAAP//ALgAAASqB2YCJgAzAAABBwFPAU4BUgAVtAIbBSYCuP/WtBQYEAklASs1ACs1AP//AKD+FAS0BhQCJgBTAAABBwFPAVIAAAAOuQAC/+G0ICQIECUBKzX//wC4AAAFSAdmAiYANQAAAQcBTwFgAVIAFbQCHgUmArj/mbQXGwwUJQErNQArNQD//wCgAAADdwYUAiYAVQAAAQcBTwDRAAAADrkAAf//tBEVCwIlASs1//8AuP5SBUgFtgImADUAAAEHAmQFYAAAAA65AAL/37QXGwwUJQErNf//AJH+UgN3BHMCJgBVAAABBwJkA7gAAAALtgEAERUKCSUBKzUAAAD//wC4/lIFSAb+AiYANQAAACcBTQAnAVIBBwJkBWAAAAAgtAIZBSYDuP/ftRsfCxQlArj/5LQaGQwQJSs1KzUAKzUAAP//AJH+UgN3BawCJgBVAAAAJgFNigABBwJkA7gAAAAZuQAC//+1FRkKCSUBuP/ttBQTCwIlKzUrNQAAAP//ALj+oQVIBbYCJgA1AAABBwFNAHX5yAAeQAwCABlQGXAZvxkEGQK4/+O0GhkMFCUBKzUAEV01////6/6hA3cEcwImAFUAAAEHAU3+0PnIABtAEgEAEVARcBG/EQQRAQYREgoJJQErNQARXTUAAAD//wBe/+wEFwdmAiYANgAAAQcBTwECAVIAE0ALAS8FJgEAKCwGACUBKzUAKzUAAAD//wBc/+wDrAYUAiYAVgAAAQcBTwDDAAAADrkAAf/4tCYqFAAlASs1//8AXv5SBBcFywImADYAAAEHAmQEogAAAA65AAH/57QoLAYAJQErNf//AFz+UgOsBHMCJgBWAAABBwJkBG0AAAAOuQAB/+i0JioUACUBKzX//wBe/+wEJQdzAiYANgAAAQcJQwJOAVIAF0ANAgE9BSYCAVkoOgYAJQErNTUAKzU1AAAA//8AXP/sA7wGIQImAFYAAAEHCUMB5QAAAA23AgEmJjgUACUBKzU1AP//AF7/7AQXCDcCJgA2AAABBwlEAjcBUgAZtgIBPgUmAgG4//u0QTsGACUBKzU1ACs1NQD//wBc/+wDrAblAiYAVgAAAQcJRAHuAAAAELECAbj/6LQ/ORQAJQErNTUAAP//AF7+UgQXB2YCJgA2AAAAJwFPAQIBUgEHAmQEogAAACC0AS8FJgK4/+e1MTUFACUBuP/8tCgsFBolKzUrNQArNQAA//8AXP5SA6wGFAImAFYAAAAnAU8AwwAAAQcCZARtAAAAGbkAAv/ntS8zBgAlAbj//rQmKhQZJSs1KzUA//8AKQAABHkHZgImADcAAAEHAU8BFwFSABNACwEACAwCByUBDwUmACs1ASs1AAAA//8AL//sAzcHCgImAFcAAAEHAU8AYgD2ABtAEQEPGAG/GO8YAhgBCxYaDQ4lASs1ABFdcTUAAAD//wAp/lIEeQW2AiYANwAAAQcCZATRAAAAC7YBAAgMAQAlASs1AAAA//8AL/5SAzcFTAImAFcAAAEHAmQEfQAAAAu2AQAWGggDJQErNQAAAP//ACn+oQR5BbYCJgA3AAABBwFN/+T5yAAbQBIBAAhQCHAIvwgECAEBCwoEBiUBKzUAEV01AAAA//8AL/6hA0UFTAImAFcAAAEHAU3/hPnIAB5ADAEAFlAWcBa/FgQWAbj/9rQWFwgDJQErNQARXTX//wAp/mcEeQW2AiYANwAAAQcBS//c+Y4AMbQBQAsBC7j/wEAUCQtIAAtAC1ALcAuQC6AL8AsHCwG4//i0DhUEBiUBKzUAEV0rcTUA//8AL/5nA5kFTAImAFcAAAEHAUv/evmOAC60AUAYARi4/8BAGgkLSAAYQBhQGHAYkBigGPAYBxgBMxwjCwMlASs1ABFdK3E1//8Arv5yBV4FtgImADgAAAEHAGoAmvl6ACFAFgIBABYgFlAWoBbvFgUWAgECEyUJASUBKzU1ABFdNTUA//8Amv5yBKIEXgImAFgAAAEHAGoAMfl6ACFAFgIBABYgFlAWoBbvFgUWAgEBFScJFCUBKzU1ABFdNTUA//8Arv5ZBV4FtgImADgAAAEHAVIAnPmCAC2xARu4/8CzDhBIG7j/wEAUCQtIABtQG6Ab8BsEGwEDFyMJASUBKzUAEV0rKzUA//8Amv5ZBKIEXgImAFgAAAEHAVIAL/mCADCxARu4/8CzDhBIG7j/wEAOCQtIABtQG6Ab8BsEGwG4//+0GSUJFCUBKzUAEV0rKzUAAP//AK7+ZwVeBbYCJgA4AAABBwFLAJr5jgAutAFAFwEXuP/AQBoJC0gAF0AXUBdwF5AXoBfwFwcXAQAZIAkBJQErNQARXStxNf//AJr+ZwSiBF4CJgBYAAABBwFLADH5jgAutAFAFwEXuP/AQBoJC0gAF0AXUBdwF5AXoBfwFwcXAQAbIgkUJQErNQARXStxNf//AK7/7AVeCF4CJgA4AAABBwlIAwYBUgAXQA0CASUFJgIBASUxCQElASs1NQArNTUAAAD//wCa/+wEogcMAiYAWAAAAQcJSAKgAAAADbcCAQMnMwkUJQErNTUA//8Arv/sBV4INwImADgAAAEHCUUDBgFSACFAFAMCAQ8WHxYCFgUmAwIBARYVCQElASs1NTUAK3E1NTUA//8Amv/sBKIG5QImAFgAAAEHCUUCnAAAABKyAwIBuP//tBgXCRQlASs1NTX//wAAAAAFMwdgAiYAOQAAAQcBUgAvAVIAE0ALARAFJgEDEBwEASUBKzUAKzUAAAD//wAAAAAEjQYOAiYAWQAAAQYBUt4AAAu2AQUQHAEKJQErNQD//wAA/lIFMwW2AiYAOQAAAQcCZAUbAAAAC7YBAAwQAwIlASs1AAAA//8AAP5SBI0EXgImAFkAAAEHAmQEyQAAAAu2AQAMEAALJQErNQAAAP//AAAAAAe8B2YCJgA6AAABBwFPAqQBUgATQAsBAB4iExQlASUFJgArNQErNQAAAP//ABQAAAbFBhQCJgBaAAABBwFPAjEAAAAOuQAB//u0HCAPECUBKzX//wAA/lIHvAW2AiYAOgAAAQcCZAZeAAAADrkAAf//tB4iCx0lASs1//8AFP5SBsUEXgImAFoAAAEHAmQF7AAAAA65AAH//7QcIAYaJQErNf//AAAAAAVWB2YCJgA7AAABBwFPAXEBUgAVtAETBSYBuP//tAwQBAAlASs1ACs1AP//AAoAAASWBhQCJgBbAAABBwFPARcAAAALtgEADBALByUBKzUAAAD//wAAAAAFVgdWAiYAOwAAAQcAagA9AVIAF0ANAgEhBSYCAQAMHgQAJQErNTUAKzU1AAAA//8ACgAABJYGBAImAFsAAAEGAGrkAAANtwIBAgweCwclASs1NQAAAP//AAAAAAT+B2YCJgA8AAABBwFPAUYBUgATQAsBEAUmAQAJDQcCJQErNQArNQAAAP//AAD+FASNBhQCJgBcAAABBwFPAQ4AAAALtgEAFxsACSUBKzUAAAD//wAxAAAEcQdzAiYAPQAAAQcBS//kAVIAE0ALARcFJgEAEBcCCSUBKzUAKzUAAAD//wA3AAADsQYhAiYAXQAAAQYBS5IAAAu2AQ4QFwIJJQErNQD//wAx/lIEcQW2AiYAPQAAAQcCZATRAAAADrkAAf//tAoOAgklASs1//8AN/5SA6oEXgImAF0AAAEHAmQEdQAAAAu2AQMKDgIJJQErNQAAAP//ADH+oQRxBbYCJgA9AAABBwFN/+T5yAAbQBIBAAtQC3ALvwsECwEBDQwCCSUBKzUAEV01AAAA//8AN/6hA6oEXgImAF0AAAEHAU3/gvnIABtAEgEAC1ALcAu/CwQLAQANDAIJJQErNQARXTUAAAD//wCg/qEEqAYUAiYASwAAAQcBTQA3+cgAG0ASAQAYUBhwGL8YBBgBARkYChUlASs1ABFdNQAAAP//AC//7AM3BvoCJgBXAAABBwBq/yQA9gAQsQIBuP/ftBYoCwMlASs1NQAA//8AFAAABsUGsgImAFoAAAEHAVABHwAAAA23AgEAIhwPECUBKzU1AP//AAD+FASNBrICJgBcAAABBgFQ+QAADbcCAQEdFwAJJQErNTUAAAD//wBW/+wEOwZ1AiYARAAAAQcErAKeAAAAC7YCVS4qCBglASs1AAAA//8AoAAAAz8HjwImAUEAAAEHAU8AyQF7ABNACwEUAiYBEg0RBQolASs1ACs1AAAAAAEArv/sBd0FywAjAFJAKxAjIwAIGgccCxcXHBoABCQlCAcEGwgIGWxZCAgEABIEH2lZBAQOFGtZDhMAPysAGD8rABg/EjkvKxEAMxI5ORESARc5ETMRMxEzETMRMzEwMxEQACEyBBcHFhYVFAQhIicRFhYzMjY1NCEjNTcmJiMiBhURrgE/ARvxATEo47S6/un+/d2cULBOhYv+z1jnIoBgmIkDngEIASXUxN8i1anY8EsBBjAsbWrJ1+5FPKCq/IMAAP//AFz/7AUABlgCJgF+AAABBweWAXMAAAALtgIJKzEPGSUBKzUAAAD//wBc/+wFAAZYAiYBfgAAAQcH0AE/AAAADrkAAv/TtDUrDxklASs1//8AXP/sBQAGNQImAX4AAAEGB6UvAAAQsQMCuP/7tDQrDxklASs1Nf//AFz/7AUABjUCJgF+AAABBgeyLwAAELEDArj/87Q0Og8ZJQErNTX//wBc/+wFAAY1AiYBfgAAAQYHpmAAAA23AwItNC8PGSUBKzU1AAAA//8AXP/sBQAGNQImAX4AAAEGB7NMAAANtwMCHDQ+DxklASs1NQAAAP//AFz/7AUABuECJgF+AAABBgenIwAAELEDArj/+LRDTw8ZJQErNTX//wBc/+wFAAbhAiYBfgAAAQYHtBsAABCxAwK4//C0Q08PGSUBKzU1//8AAAAABYUFzAImACQAAAEHB5b/cP90ABSzAhEEArj/jrQOFAQFJQErNQA/NQAA//8AAAAABYUFzAImACQAAAEHB9D/Y/90ABSzAhsEArj/f7QYDh4FJQErNQA/NQAA//8AAQAABrgFzAAnACQBMwAAAQcHpf7x/5cAFkAMAwIaBAMCSQ4OBAUlASs1NQA/NTUAAP//AAEAAAa4BcwAJwAkATMAAAEHB7L+8f+XABZADAMCGgQDAkkdHQQFJQErNTUAPzU1AAD//wABAAAGuAXMACcAJAEzAAABBwem/vH/lwAWQAwDAhoEAwJLEhIEBSUBKzU1AD81NQAA//8AAQAABrgFzAAnACQBMwAAAQcHs/7x/5cAGLUDAhoEAwK4/160ICAFBSUBKzU1AD81Nf///+MAAAZ7BnwAJwAkAPYAAAEHB6f+8f+bACFAFQMCrykBACkQKT8pAykDAsYyMgQFJQErNTUAEV1xNTUAAAD////jAAAGewZ8ACcAJAD2AAABBwe0/vH/mwAhQBUDAq8pAQApECk/KQMpAwLGMjIEBSUBKzU1ABFdcTU1AAAA//8ATv/sBCUGWAImAYIAAAEHB5YBTgAAAAu2ARcnLRYcJQErNQAAAP//AE7/7AQlBlgCJgGCAAABBwfQATMAAAAOuQAB//q0MScWHCUBKzX//wBO/+wEJQY1AiYBggAAAQYHpQoAAA23AgEJMCcWHCUBKzU1AAAA//8ATv/sBCUGNQImAYIAAAEGB7L3AAAQsQIBuP/utDA2FhwlASs1Nf//AE7/7AQlBjUCJgGCAAABBgemMQAADbcCATEwKhYcJQErNTUAAAD//wBO/+wEJQY1AiYBggAAAQYHsx0AAA23AgEgMDkWHCUBKzU1AAAA//8AAQAABNUFzAAnACgA0wAAAQcHlv9w/3QAFLMBDwQBuP+FtBISAgIlASs1AD81//8AAQAABNUFzAAnACgA0wAAAQcH0P9j/3QAFLMBGQQBuP92tAwMAgIlASs1AD81//8AAQAABisFzAAnACgCKQAAAQcHpf7x/5cAGLUCARgEAgG4/520DAwCAiUBKzU1AD81Nf//AAEAAAYrBcwAJwAoAikAAAEHB7L+8f+XABi1AgEYBAIBuP+dtBsbAgIlASs1NQA/NTX//wABAAAGAgXMACcAKAIAAAABBwem/vH/lwAYtQIBGAQCAbj/yLQQEAICJQErNTUAPzU1////7AAABgIFzAAnACgCAAAAAQcHs/7c/5cAGLUCARgEAgG4/8i0Hx8CAiUBKzU1AD81Nf//AKD+FASoBlgCJgGEAAABBweWAbQAAAALtgEoHxsLFCUBKzUAAAD//wCg/hQEqAZYAiYBhAAAAQcH0AFqAAAADrkAAf/rtB8cCxQlASs1//8AoP4UBKgGNQImAYQAAAEGB6VCAAAQsQIBuP/ttCgdCxQlASs1Nf//AKD+FASoBjUCJgGEAAABBgeyQgAAELECAbj/7bQeLAsUJQErNTX//wCg/hQEqAY1AiYBhAAAAQYHpnMAAA23AgEeKBkLFCUBKzU1AAAA//8AoP4UBKgGNQImAYQAAAEGB7NeAAANtwIBFB4oCxQlASs1NQAAAP//AKD+FASoBuECJgGEAAABBgenRgAADbcCAQEtOQsUJQErNTUAAAD//wCg/hQEqAbhAiYBhAAAAQYHtDEAABCxAgG4/+20LTkLFCUBKzU1//8AAQAABjkFzAAnACsA0wAAAQcHlv9w/3QAFLMBDwQBuP+FtBISBgYlASs1AD81//8AAQAABjkFzAAnACsA0wAAAQcH0P9j/3QAFLMBGQQBuP92tAwMBgYlASs1AD81//8AAQAAB48FzAAnACsCKQAAAQcHpf7x/5cAGLUCARgEAgG4/520DAwGBiUBKzU1AD81Nf//AAEAAAePBcwAJwArAikAAAEHB7L+8f+XABi1AgEYBAIBuP+dtBsbBgYlASs1NQA/NTX//wABAAAHZgXMACcAKwIAAAABBwem/vH/lwAYtQIBGAQCAbj/yLQQEAYGJQErNTUAPzU1////7AAAB2YFzAAnACsCAAAAAQcHs/7c/5cAGLUCARgEAgG4/8i0Hx8GBiUBKzU1AD81Nf///84AAAdeBnwAJwArAfgAAAEHB6f+3P+bACRADwIBrykBACkQKT8pAykCAbj/+bQwMAYGJQErNTUAEV1xNTX////OAAAHXgZ8ACcAKwH4AAABBwe0/tz/mwAkQA8CAa8pAQApECk/KQMpAgG4//m0MDAGBiUBKzU1ABFdcTU1//8AoP/sAxcGWAImAYYAAAEGB5YzAAALtgEbDhQNACUBKzUA//8Anv/sAxcGWAImAYYAAAEGB9AAAAAOuQAB/+a0GA4NACUBKzUAAP////3/7AMXBjUCJgGGAAABBwel/u0AAAANtwIBAyEODQAlASs1NQD////w/+wDFwY1AiYBhgAAAQcHsv7gAAAAELECAbj/9rQXHQ0AJQErNTUAAP//ACz/7AMXBjUCJgGGAAABBwem/xwAAAANtwIBMyESDQAlASs1NQD//wAD/+wDFwY1AiYBhgAAAQcHs/7zAAAADbcCARUXIQ0AJQErNTUA////1v/sAxcG4QImAYYAAAEHB6f+5AAAAA23AgEIDhQNACUBKzU1AP///87/7AMXBuECJgGGAAABBwe0/twAAAAQsQIBuP/6tBcODQAlASs1NQAA//8AAQAABAgFzAAnACwBLQAAAQcHlv9w/3QAFLMBDwQBuP+htBISBQUlASs1AD81//8AAQAAA/wFzAAnACwBIQAAAQcH0P9j/3QAFLMBGQQBuP+etAwMBQUlASs1AD81//8AAQAABTkFzAAnACwCXgAAAQcHpf7x/5cAGLUCARgEAgG4/960FBQFBSUBKzU1AD81Nf//AAEAAAU5BcwAJwAsAl4AAAEHB7L+8f+XABi1AgEYBAIBuP/etCMjBQUlASs1NQA/NTX//wABAAAFTgXMACcALAJzAAABBwem/vH/lwAYtQIBGAQCAbj/y7QQEAUFJQErNTUAPzU1//8AAQAABWIFzAAnACwChwAAAQcHs/7x/5cAGLUCARgEAgG4/8y0Hx8FBSUBKzU1AD81Nf///84AAAUpBnwAJwAsAk4AAAEHB6f+3P+bACFAFQIBrykBACkQKT8pAykCARkwMAYGJQErNTUAEV1xNTUAAAD////OAAAFKQZ8ACcALAJOAAABBwe0/tz/mwAhQBUCAa8pAQApECk/KQMpAgEZMDAGBiUBKzU1ABFdcTU1AAAA//8AXP/sBJgGWAImAFIAAAEHB5YBdQAAAAu2AhMkIBMMJQErNQAAAP//AFz/7ASYBlgCJgBSAAABBwfQAT8AAAAOuQAC/+q0JCETDCUBKzX//wBc/+wEmAY1AiYAUgAAAQYHpSsAAA23AwIALSITDCUBKzU1AAAA//8AXP/sBJgGNQImAFIAAAEGB7IrAAANtwMCACMxEwwlASs1NQAAAP//AFz/7ASYBjUCJgBSAAABBgemUgAADbcDAictHhMMJQErNTUAAAD//wBc/+wEmAY1AiYAUgAAAQYHsz0AAA23AwIdIy0TDCUBKzU1AAAA//8AAf/sBnYFzQAnADIAjwAAAQcHlv9w/3QAEkAKAhkEAgocHAYGJQErNQA/NQAA//8AAf/sBokFzQAnADIAogAAAQcH0P9j/3QAFLMCIwQCuP/2tB0dBgYlASs1AD81//8AAf/sB+0FzQAnADICBgAAAQcHpf7x/5cAFkAMAwIiBAMCABYWBgYlASs1NQA/NTUAAP//AAH/7AftBc0AJwAyAgYAAAEHB7L+8f+XABZADAMCIgQDAgAlJQYGJQErNTUAPzU1AAD//wAB/+wHkQXNACcAMgGqAAABBwem/vH/lwAYtQMCIgQDArj/jrQdHQYGJQErNTUAPzU1//8AAf/sB6UFzQAnADIBvgAAAQcHs/7x/5cAFkAMAwIiBAMCYCgoBgYlASs1NQA/NTUAAP//AI//7gS8BlgCJgGSAAABBweWAY8AAAALtgECIBwEEyUBKzUAAAD//wCP/+4EvAZYAiYBkgAAAQcH0AFQAAAADrkAAf/QtCAdBBMlASs1//8Aj//uBLwGNQImAZIAAAEGB6VEAAAQsQIBuP/ttCkeBBMlASs1Nf//AI//7gS8BjUCJgGSAAABBgeyRAAAELECAbj/7bQfLQQTJQErNTX//wCP/+4EvAY1AiYBkgAAAQYHpnUAAA23AgEfKRoEEyUBKzU1AAAA//8Aj//uBLwGNQImAZIAAAEGB7NgAAANtwIBFB8pBBMlASs1NQAAAP//AI//7gS8BuECJgGSAAABBgenSAAADbcCAQIuOgQTJQErNTUAAAD//wCP/+4EvAbhAiYBkgAAAQYHtDMAABCxAgG4/+20LjoEEyUBKzU1//8AAQAABkwFzAAnADwBTgAAAQcH0P9j/3QAFLMBFgQBuP+ztAkJBwclASs1AD81//8AAQAAB40FzAAnADwCjwAAAQcHsv7x/5cAGLUCARUEAgG4/5u0Hh4HByUBKzU1AD81Nf//AAEAAAeiBcwAJwA8AqQAAAEHB7P+8f+XABi1AgEVBAIBuP/xtBsbBwclASs1NQA/NTX////jAAAHogZ8ACcAPAKkAAABBwe0/vH/mwAhQBUCAa8pAQApECk/KQMpAgEaLS0HByUBKzU1ABFdcTU1AAAA//8Abf/sBnsGWAImAZYAAAEHB5YCbwAAAAu2ARMyLgolJQErNQAAAP//AG3/7AZ7BlgCJgGWAAABBwfQAjkAAAAOuQAB/+q0Mi8KJSUBKzX//wBt/+wGewY1AiYBlgAAAQcHpQElAAAADbcCAQA7MAolJQErNTUA//8Abf/sBnsGNQImAZYAAAEHB7IBEAAAABCxAgG4/+u0MT8KJSUBKzU1AAD//wBt/+wGewY1AiYBlgAAAQcHpgFgAAAADbcCATs7LAolJQErNTUA//8Abf/sBnsGNQImAZYAAAEHB7MBTAAAAA23AgEyMTsKJSUBKzU1AP//AG3/7AZ7BuECJgGWAAABBwenASkAAAANtwIBFEBMCiUlASs1NQD//wBt/+wGewbhAiYBlgAAAQcHtAEAAAAAELECAbj/7LRATAolJQErNTUAAP//AAEAAAbCBc0AJwF2ALAAAAEHB5b/cP90ABSzASQEAbj/87QnJw0NJQErNQA/Nf//AAEAAAbXBc0AJwF2AMUAAAEHB9D/Y/90ABSzAS4EAbj/3bQoKA0NJQErNQA/Nf//AAEAAAg5Bc0AJwF2AicAAAEHB6X+8f+XABi1AgEtBAIBuP/qtCEhDQ0lASs1NQA/NTX//wABAAAIOQXNACcBdgInAAABBwey/vH/lwAYtQIBLQQCAbj/6rQwMA0NJQErNTUAPzU1//8AAQAAB90FzQAnAXYBywAAAQcHpv7x/5cAGLUCAS0EAgG4/3e0KCgNDSUBKzU1AD81Nf//AAEAAAfxBc0AJwF2Ad8AAAEHB7P+8f+XABi1AgEtBAIBuP98tDc3DQ0lASs1NQA/NTX////OAAAHhQZ8ACcBdgFzAAABBwen/tz/mwAkQA8CAa8pAQApECk/KQMpAgG4/820JycNDSUBKzU1ABFdcTU1////zgAAB4UGfAAnAXYBcwAAAQcHtP7c/5sAJEAPAgGvKQEAKRApPykDKQIBuP/HtCIiDQ0lASs1NQARXXE1Nf//AFz/7AUABiECJgF+AAABBgfEIQAADrkAAv+7tC8rDxklASs1AAD//wBc/+wFAAYhAiYBfgAAAQYHz1oAAAu2AkozLw8ZJQErNQD//wBO/+wEJQYhAiYBggAAAQYHxPkAAA65AAH/xrQrJxYcJQErNQAA//8ATv/sBCUGIQImAYIAAAEGB88xAAALtgFULysWHCUBKzUA//8AoP4UBKgGIQImAYQAAAEGB8QvAAAOuQAB/6+0Gh0LFCUBKzUAAP//AKD+FASoBiECJgGEAAABBgfPaAAAC7YBPh0ZCxQlASs1AP//ADv/7AMXBiECJgGGAAABBwfE/sYAAAALtgEaDw4NACUBKzUAAAD//wCg/+wDFwYhAiYBhgAAAQcHz/8BAAAADrkAAf/atBYVDQAlASs1//8AXP/sBJgGIQImAFIAAAEGB8QEAAAOuQAC/660HyITDCUBKzUAAP//AFz/7ASYBiECJgBSAAABBgfPPQAAC7YCPSIeEwwlASs1AP//AI//7gS8BiECJgGSAAABBgfEJwAADrkAAf+mtBseBBMlASs1AAD//wCP/+4EvAYhAiYBkgAAAQYHz2AAAAu2ATQeGgQTJQErNQD//wBt/+wGewYhAiYBlgAAAQcHxAD+AAAAC7YBFykoFxglASs1AAAA//8Abf/sBnsGIQImAZYAAAEHB88BNwAAAA65AAH/1bQwLxcYJQErNf//AFz+FAUABlgCJgF+AAAAJweWAXMAAAEGB5c5AAAXuQAD/+9ADEdBDyIlAgkrMQ8ZJSs1KzUA//8AXP4UBQAGWAImAX4AAAAnB9ABPwAAAQYHlzkAABm5AAP/77VGQg8iJQK4/9O0NSsPGSUrNSs1AAAA//8AXP4UBQAGNQImAX4AAAAmB6UvAAEGB5c5AAAbuQAE/++2TkoPIiUDArj/87Q9Kw8ZJSs1NSs1AAAA//8AXP4UBQAGNQImAX4AAAAmB7IvAAEGB5c5AAAbuQAE/++2TkoPIiUDArj/87Q0Og8ZJSs1NSs1AAAA//8AXP4UBQAGNQImAX4AAAAmB6ZgAAEGB5c5AAAZuQAE/+9ADU9JDyIlAwIlPS4PGSUrNTUrNQD//wBc/hQFAAY1AiYBfgAAACYHs0wAAQYHlzkAABm5AAT/70ANT0kPISUDAhw0Pg8ZJSs1NSs1AP//AFz+FAUABuECJgF+AAAAJgenIwABBgeXOQAAG7kABP/vtl5YDyIlAwK4//i0Q08PGSUrNTUrNQAAAP//AFz+FAUABuECJgF+AAAAJge0GwABBgeXOQAAG7kABP/vtl5YDyIlAwK4//C0Q08PGSUrNTUrNQAAAP//AAD/7AicBcwAJgAkAAAAJweW/3D/dAEHAYYFhQAAAB+zAhEEArj/DLUUFAUFJQO4/5e0Kx4HLSUrNSs1AD81AAAA//8AAP/sCJwFzAAnB9D/Y/90ACYAJAAAAQcBhgWFAAAAIbQADQQCAbgBA7UVFQAAJQO4/5e0Kx4XLSUrNSs1NQA/NQD//wAB/+wJzwXMACcHpf7x/5cAJwAkATMAAAEHAYYGuAAAACFADQEADAQDArkdHQAAJQS4/5e0MyYfNSUrNSs1NQA/NTUAAAD//wAB/+wJzwXMACcHsv7x/5cAJwAkATMAAAEHAYYGuAAAACFADQEADAQDArkdHQ8PJQS4/5e0MyYfNSUrNSs1NQA/NTUAAAD//wAB/+wJzwXMACcHpv7x/5cAJwAkATMAAAEHAYYGuAAAACFADQEADAQDArcdHQQEJQS4/5e0MyYfNSUrNSs1NQA/NTUAAAD//wAB/+wJzwXMACcHs/7x/5cAJwAkATMAAAEHAYYGuAAAACFADQEADAQDAqIdHRISJQS4/5e0MyYfNSUrNSs1NQA/NTUAAAD////j/+wJkgZ8ACcHp/7x/5sAJwAkAPYAAAEHAYYGewAAACxAFgEAryMBACMQIz8jAyMDAjwsLCQkJQS4/5i0QjUuRCUrNSs1NQARXXE1Nf///+P/7AmSBnwAJwe0/vH/mwAnACQA9gAAAQcBhgZ7AAAALEAWAQCvIwEAIxAjPyMDIwMCPCwsJCQlBLj/mLRCNS5EJSs1KzU1ABFdcTU1//8AoP4UBKgGWAImAYQAAAAnB5YBtAAAAQcHl/7/AAAAF7kAAv/eQAwxJQoJJQEwFRsLFCUrNSs1AAAA//8AoP4UBKgGWAImAYQAAAAnB9ABagAAAQcHl/7/AAAAGbkAAv/etTElCgklAbj/5LQfFQsUJSs1KzUA//8AoP4UBKgGNQImAYQAAAAmB6VCAAEHB5f+/wAAABu5AAP/3rY5LQoJJQIBuP/0tB4VCxQlKzU1KzUA//8AoP4UBKgGNQImAYQAAAAmB7JCAAEHB5f+/wAAABu5AAP/3rY5LQoJJQIBuP/stB4kCxQlKzU1KzUA//8AoP4UBKgGNQImAYQAAAAmB6ZzAAEHB5f+/wAAABm5AAP/3kANOS0KCSUCASYeGQsUJSs1NSs1AAAA//8AoP4UBKgGNQImAYQAAAAmB7NeAAEHB5f+/wAAABm5AAP/3kANOS0KCSUCARQeJwsUJSs1NSs1AAAA//8AoP4UBKgG4QImAYQAAAAmB6dGAAEHB5f+/wAAABm5AAP/3kANSDwKCSUCAQEtOQsUJSs1NSs1AAAA//8AoP4UBKgG4QImAYQAAAAmB7QxAAEHB5f+/wAAABu5AAP/3rZIPAoJJQIBuP/stC05CxQlKzU1KzUA//8AAf/sCgkFzAAnACsA0wAAACcHlv9w/3QBBwGGBvIAAAAfswEPBAG4/4W1EhIGBiUCuP/0tCkcACslKzUrNQA/NQD//wAB/+wKCQXMACcAKwDTAAAAJwfQ/2P/dAEHAYYG8gAAAB+zARkEAbj/drUMDAYGJQK4//S0KRwAKyUrNSs1AD81AP//AAH/7AtfBcwAJwArAikAAAAnB6X+8f+XAQcBhghIAAAAI7UCARgEAgG4/521DAwGBiUDuP/0tDEkCzMlKzUrNTUAPzU1AP//AAH/7AtfBcwAJwArAikAAAAnB7L+8f+XAQcBhghIAAAAI7UCARgEAgG4/521IyMGBiUDuP/0tDEkCzMlKzUrNTUAPzU1AP//AAH/7As2BcwAJwArAgAAAAAnB6b+8f+XAQcBhggfAAAAI7UCARgEAgG4/8i1EBAGBiUDuP/0tDEkCzMlKzUrNTUAPzU1AP///+z/7As2BcwAJwArAgAAAAAnB7P+3P+XAQcBhggfAAAAI7UCARgEAgG4/8i1Hx8GBiUDuP/0tDEkCzMlKzUrNTUAPzU1AP///87/7AsuBnwAJwArAfgAAAAnB6f+3P+bAQcBhggXAAAAL0APAgGvIwEAIxAjPyMDIwIBuP/5tTAwBgYlA7j/9LRAMwtCJSs1KzU1ABFdcTU1AP///87/7AsuBnwAJwArAfgAAAAnB7T+3P+bAQcBhggXAAAAL0APAgGvIwEAIxAjPyMDIwIBuP/5tTAwBgYlA7j/9LRAMwtCJSs1KzU1ABFdcTU1AP//AG3+FAZ7BlgCJgGWAAAAJweWAm8AAAEHB5cBVAAAABe5AAL/+EAMRDgXGCUBHCguFxglKzUrNQAAAP//AG3+FAZ7BlgCJgGWAAAAJwfQAjkAAAEHB5cBVAAAABm5AAL/+LVEOBcYJQG4/+S0MigXGCUrNSs1AP//AG3+FAZ7BjUCJgGWAAAAJwelASUAAAEHB5cBVAAAABm5AAP/+EANTEAXGCUCAQcxKAolJSs1NSs1AP//AG3+FAZ7BjUCJgGWAAAAJweyARAAAAEHB5cBVAAAABu5AAP/+LZMQBcYJQIBuP/qtDE3CiUlKzU1KzUAAAD//wBt/hQGewY1AiYBlgAAACcHpgFgAAABBweXAVQAAAAZuQAD//hADUxAFxglAgFDMSwKJSUrNTUrNQD//wBt/hQGewY1AiYBlgAAACcHswFMAAABBweXAVQAAAAZuQAD//hADUxAFxglAgEyMTsKJSUrNTUrNQD//wBt/hQGewbhAiYBlgAAACcHpwEpAAABBweXAVQAAAAZuQAD//hADVtPFxglAgEUQEwKJSUrNTUrNQD//wBt/hQGewbhAiYBlgAAACcHtAEAAAABBweXAVQAAAAbuQAD//i2W08XGCUCAbj/67RATAolJSs1NSs1AAAA//8AAf/sChEFzQAnAXYAsAAAACcHlv9w/3QBBwGGBvoAAAAfswEkBAG4//O1JycNDSUCuP/OtD4xFEAlKzUrNQA/NQD//wAB/+wKJQXNACcH0P9j/3QAJwF2AMUAAAEHAYYHDgAAAB1ACwANBAExHR0AACUCuP/NtD4xJEAlKzUrNQA/NQAAAP//AAH/7AuIBc0AJwel/vH/lwAnAXYCJwAAAQcBhghxAAAAH0AMAQAMBAIWJSUAACUDuP/OtEY5LEglKzUrNQA/NTUA//8AAf/sC4gFzQAnB7L+8f+XACcBdgInAAABBwGGCHEAAAAfQAwBAAwEAhYlJQ8PJQO4/860RjksSCUrNSs1AD81NQD//wAB/+wLKwXNACcHpv7x/5cAJwF2AcsAAAEHAYYIFAAAAB9ADAEADAQCiSUlBwclA7j/zbRGOSxIJSs1KzUAPzU1AP//AAH/7AtABc0AJwez/vH/lwAnAXYB3wAAAQcBhggpAAAAH0AMAQAMBAKEJSUWFiUDuP/OtEY5LEglKzUrNQA/NTUA////zv/sCtMGfAAnB6f+3P+bACcBdgFzAAABBwGGB7wAAAAqQBUBAK8jAQAjECM/IwMjAjM0NAYGJQO4/820VUg7VyUrNSs1ABFdcTU1AAD////O/+wK0wZ8ACcHtP7c/5sAJwF2AXMAAAEHAYYHvAAAACpAFQEAryMBACMQIz8jAyMCOTQ0AAAlA7j/zbRVSDtXJSs1KzUAEV1xNTUAAP//AFz/7AUABisCJgF+AAABBgFOKQAAC7YCCzErDxklASs1AP//AFz/7AUABawCJgF+AAABBgFNKQAAC7YCDSssDxklASs1AP//AFz+FAUABiECJgF+AAAAJgfEIQABBgeXOQAAGbkAA//vtT87DyIlArj/u7QvMw8ZJSs1KzUA//8AXP4UBQAEcQImAX4AAAEGB5c5AAAOuQAC/++0NzIPIiUBKzUAAP//AFz+FAUABiECJgF+AAAAJgfPWgABBgeXOQAAF7kAA//vQAw/Ow8iJQJKMy8PGSUrNSs1AAAA//8AXP/sBQAGDgImAX4AAAEGAVIOAAAOuQAC//G0LzsPGSUBKzUAAP//AFz+FAUABg4CJgF+AAAAJgFSDgABBgeXOQAAGbkAA//vtU5JDyIlArj/8bQvOw8ZJSs1KzUA//8AAAAABYUHfQImACQAAAEHAU4AVgFSABNACwIAFA4FBiUCEQUmACs1ASs1AAAA//8AAAAABYUG/gImACQAAAEHAU0AWAFSABNACwIFERAFBiUCEQUmACs1ASs1AAAA////7QAABYUFvAImACQAAAEHB8T+eP+XABJACgIUAwJJDg4XBSUBKzUAPzX////vAAAFhQW8AiYAJAAAAQcHz/4k/5cAFLMCEAMCuP9JtBERBQUlASs1AD81AAD//wAA/+wInAW8ACYAJAAAAQcBhgWFAAAADrkAAv+XtBsOBx0lASs1AAEAkQTDAaAGWAAPACZAFQYNDQAJAxARDUADAQMPCV8JrwkDCQAvXcRdMhESARc5ETMxMBM0NjMyFhUUBgc1NjY1JiagQTFBTYuEQkY1RAXpNTpXSXGCAlYDMSADPQAAAAEBrv4UAxv/lgAMAChAEwcBAQsLDQ4PDAEKAwwJBIVZCRsAPysAGC9fXl0REgE5ETMRMzEwBRUUFjMyNxUGIyI1NQKBIysqIj1f0WpnMzEMqBvepAAAAP//AJEEVwGgBewABgeWAJT//wDPBNcEDAYOAgYBUgAAAAMA5QTLA8EG6QALABcALwBPQDEGABIMLQwAIQQwMSwdvyTPJAIkQAkMSCQpISQDbxh/GO8YAxgYDwMDFQ8JXwmvCQMJAC9dMzMRMzMvXRczLytdMzMREgEXOREzETMxMAE0NjMyFhUUBiMiJiU0NjMyFhUUBiMiJjciLgIjIgYHIzY2MzIeAjMyNjczBgYBBkY7OUpKOTtGAZRGOzlKSjk7RkctT0c/HCwoDX0LdV4xUUc9HiopCn0OcgVGPz4+Pzw/Pzw/Pj4/PD8/9BshHCoub3wcIRstK3R3AP//AKD+FASoBiECJgGEAAAAJgfELwABBweX/v8AAAAZuQAC/961Kh4KCSUBuP+vtBkVCxQlKzUrNQAAAP//AKD+FASoBHMCJgGEAAABBweX/v8AAAAOuQAB/960IRUKCSUBKzX//wCg/hQEqAYhAiYBhAAAACYHz2gAAQcHl/7/AAAAF7kAAv/eQAwqHgoJJQE+HRkLFCUrNSs1AP//AKD+FASoBg4CJgGEAAABBgFSMwAADrkAAf/8tBklCxQlASs1AAD//wCg/hQEqAYOAiYBhAAAACYBUjMAAQcHl/7/AAAAGbkAAv/etTktCgklAbj//LQZJQsUJSs1KzUAAAD////OAAAE2QW4ACcAKADXAAABBwfE/ln/lwAUswERAwG4/520DAwCAiUBKzUAPzX////OAAAEzQW4ACcAKADLAAABBwfP/gP/lwAUswEOAwG4/6m0Dw8CAiUBKzUAPzX////OAAAGPQW4ACcAKwDXAAABBwfE/ln/lwAUswERAwG4/520DAwGBiUBKzUAPzX////OAAAGMQW4ACcAKwDLAAABBwfP/gP/lwAUswEOAwG4/6m0Dw8GBiUBKzUAPzX//wC4/+wJNgW2ACYAKwAAAQcBhgYfAAAADrkAAf/0tBkMCxslASs1AAIBEATFA40GNQAIABcAMEAbDxYABBYJEgUYGRYMBRJfBW8FAgWADwFfAQIBAC9dGs1dxBDEMhESARc5ETMxMAEjJiYnNSEWFyU0NjMyFhUUBgc1NjY1JgONkTx0HQEKHTf9kkA0QUeWdTlAagTZR7A8FYql4TAyUT5hfwFWBSskBAAAAAIBEATFA48GNQAIABcAMEAbDxYECBYJEgUYGRYMAhJfAm8CAgKADwhfCAIIAC9dGsxdxBDEMhESARc5ETMxMAE2NyEVBgYHIyU0NjMyFhUUBgc1NjY1JgIxNx0BCiB/Mo3+7kA0QUeWdTlAagTypYoVQrk4+jAyUT5hfwFWBSskBAAAAAIA8gSoA80G4QAOACYAUkA3BgwkDAAJGAUnKCMUvxvPGwIbQAkNSBsYIBsDDw8MbwN/AwIDgA8JLwlfCX8JnwmvCc8J7wkICQAvXRrMXTIyLxczLytdMzMREgEXOREzMTABNDYzMhYVFAYHNTY1JiYlIi4CIyIGByM2NjMyHgIzMjY3MwYGAec6Mz4/f3FxMjkBBy1PR0AcKykMfQl1XzFRRz4eKikJfQ5yBYspMUgzWmUDUAY1AjCRGyEcKi5sfxwhGy0rdHf///+v/+wDFwYrAiYBhgAAAQcBTv7MAAAAC7YBABQODQAlASs1AAAA////6f/sAxcFrAImAYYAAAEHAU3+zgAAAAu2AQQREA0AJQErNQAAAP///5z/7AMXBjkCJgGGAAABBwfC/uIAAAAQQAkDAgEQFycNACUBKzU1NQAA////nP/sAxcGOQImAYYAAAEHB8P+4gAAABBACQMCARAXJw0AJQErNTU1AAD///+r/+wDFwYOAiYBhgAAAQcBUv7cAAAAC7YBERIeDQAlASs1AAAA////0v/sAxcG6QImAYYAAAEHB5r+7QAAABBACQMCAQcOIA0AJQErNTU1AAD//wAHAAADGgd9AiYALAAAAQcBTv8kAVIAE0ALAQ8FJgECEgwGCyUBKzUAKzUAAAD//wA/AAAC5Qb+AiYALAAAAQcBTf8kAVIAE0ALAQ8FJgEDDw4GCyUBKzUAKzUAAAD////OAAAD5wW4ACcALAEMAAABBwfE/ln/lwAUswERAwG4/4q0EhIGBiUBKzUAPzX////OAAAD+gW4ACcALAEfAAABBwfP/gP/lwAUswEOAwG4/8u0Dw8GBiUBKzUAPzUAAgEQBMUDjQY1AA4AFwAuQBkCCQ8TCQMYGQIMFAZfFG8UAhSADxBfEAIQAC9dGs1dxhDEMhESARc5ETMxMAEUBxQWFxUmJjU0NjMyFgEjJiYnNSEWFwIMakA5dpVHQTRAAYGRPHQdAQodNwXTYAQkKwVWAn9gPlEy/tZHsDwViqUAAAAAAgEQBMUDpAY1AA4AFwAuQBkCCRMXCQMYGQIMEQZfEW8RAhGADxdfFwIXAC9dGsxdxhDEMhESARc5ETMxMAEUBxQWFxUmJjU0NjMyFhM2NyEVBgYHIwIMakA5dpVHQTRAOjcdAQoddTuRBdNgBCQrBVYCf2A+UTL+76WKFTuzRQACAPIEqAPNBuEADgAmAFBANQMJJAkYAycoIxS/G88bAhtACQ1IGxggGwMPDwNvDH8MAgyADwYvBl8GfwafBq8GzwbvBggGAC9dGsxdMjIvFzMvK10zMxESARc5ETMxMAEUBgcUFxUmJjU0NjMyFjciLgIjIgYHIzY2MzIeAjMyNjczBgYCyzkycXF/Pz4zOiMtT0dAHCspDH0JdV8xUUc+HiopCX0OcgWLJjACNQZQA2VaM0gxQhshHCoubH8cIRstK3R3AAAA//8Aj//uBLwGKwImAZIAAAEGAU4nAAAOuQAB/+60HBYEEyUBKzUAAP//AI//7gS8BawCJgGSAAABBgFNJwAADrkAAf/wtBkYBBMlASs1AAD//wCP/+4EvAY5AiYBkgAAAQYHwisAABKyAwIBuP/stB8vBBMlASs1NTUAAP//AI//7gS8BjkCJgGSAAABBgfDKwAAErIDAgG4/+y0Hy8EEyUBKzU1NQAA//8Aef4UBJYGWAImAY4AAAEHB5YBewAAAAu2AgwpJQsAJQErNQAAAP//AHn+FASWBlgCJgGOAAABBwfQAUgAAAAOuQAC/+a0KSYLACUBKzX//wCP/+4EvAYOAiYBkgAAAQYBUjUAAA65AAH//bQaJgQTJQErNQAA//8Aj//uBLwG6QImAZIAAAEGB5o/AAASsgMCAbj/7bQ3QwQTJQErNTU1AAD//wAAAAAE/gd9AiYAPAAAAQcBTgASAVIAE0ALAQwFJgEADwkHAiUBKzUAKzUAAAD//wAAAAAE/gb+AiYAPAAAAQcBTQASAVIAE0ALAQwFJgEBDAsHAiUBKzUAKzUAAAD////OAAAGOwW4ACcAPAE9AAABBwfE/ln/lwAUswEOAwG4/5u0Dw8HByUBKzUAPzX////OAAAGOwW4ACcHz/4D/5cBBwA8AT0AAAASQAoAAgMBERAQAwMlASs1AD81AAD//wABAAAFfQXMACcAMwDTAAABBwfQ/2P/dAAUswIhBAK4/3a0FBQQECUBKzUAPzUAAwC6BOMEEgY5AAgAEwAeADZAHA4JAAQZFBQECQMfIBYLCxwRBYBQAQGAAdABAgEAL11xGs3EMjIRMxESARc5ETMRMxEzMTABIyYmJzUzFhclNDMyFhUUBiMiJiU0MzIWFRQGIyImAvCDMnUg9hU//cqBOUpKOTpHAlKDOUpKOTxHBPJBsUEUcr1eiEFHREFBRIhBR0RBQQADALoE4wQSBjkACAATAB4ANkAcDgkECBkUFAgJAx8gFgsLHBECgFAIAYAI0AgCCAAvXXEazMQyMhEzERIBFzkRMxEzETMxMAE2NzMVBgYHIyU0MzIWFRQGIyImJTQzMhYVFAYjIiYB3Tcd9h5zNoP+3YE5Sko5OkcCUoM5Sko5PEcFCqWKFD+vRXaIQUdEQUFEiEFHREFBAAEBdQTZAtMGIQAIACBAEQQACQpfBW8FAgWADwFfAQIBAC9dGs1dERIBOTkxMAEjJiYnNSEWFwLTjTGBHwEKFT8E2Ta8QRVyvQAAAP//AG3+FAZ7BiECJgGWAAAAJwfEAP4AAAEHB5cBVAAAABm5AAL/+LU9MRcYJQG4/660LCgKJSUrNSs1AP//AG3+FAZ7BF4CJgGWAAABBweXAVQAAAAOuQAB//i0NCgXGCUBKzX//wBt/hQGewYhAiYBlgAAACcHzwE3AAABBweXAVQAAAAXuQAC//hADD0xFxglAT0wLAolJSs1KzUAAAD//wBt/+wGewYOAiYBlgAAAQcBUgEOAAAAC7YBByw4CiUlASs1AAAA//8Abf4UBnsGDgImAZYAAAAnAVIBDgAAAQcHlwFUAAAAF7kAAv/4QAxMQBcYJQEHLDgKJSUrNSs1AAAA////zv/sBpsFzQAnADIAtAAAAQcHxP5Z/5cAEkAKAhwDAgAWFgYGJQErNQA/NQAA////zP/sBjsFzQAmADJUAAEHB8/+Af+XABSzAhgDArj/jrQdHQYGJQErNQA/NQAA////zgAABucFzQAnAXYA1QAAAQcHxP5Z/5cAFLMBJwMBuP/qtCEhDQ0lASs1AD81////zgAABokFzQAmAXZ3AAEHB8/+A/+XABSzASMDAbj/d7QoKA0NJQErNQA/NQAA//8AN//sCWEFzQAmAXYAAAEHAYYGSgAAAA65AAH/zrQuIRQwJQErNQABAcsE2QMpBiEACAAgQBEIBAkKXwJvAgICgA8IXwgCCAAvXRrMXRESATk5MTABNjchFQYGByMByzcdAQogfzKNBPKlihVCuTgAAAAAAQCeBMMBrAZYAA8AJEATAwoKEBEDQA0BDYAPB18HrwcDBwAvXRrMXTIREgE5ETMxMAEUBgcUFhcVJiY1NDYzMhYBnkU0RUKEikxBMkEF6Tw9AyAxA1YCgXJHWToAAAAAAf/V/vAAKwUGAAMACLECAwAvLzEwExEjEStWBQb56gYWAAAAAf8h/vAA3wWFAA4AHUAMDgICCAgHAQsFBQkHAC8zMxEzLxI5LzMRMzEwEyMRByc3JzcXNxcHFwcnK1Z/NaioNaqqNaioNX/+8AVYfzeopjeqqjemqDd/AAH/1/7wAbIFhQAKABK2AQAECgQHBgAvLzMzEjkyMTABByc3IxEjESEnNwGy3TeD+FIBSoM3BKrbNX36bwXhfTcAAAAAAf5M/vAAKQWFAAoAErYFBgMHAwkBAC8vMzMSOTIxMBMjESMXByc3FwchKVT4hTff3zeFAUz+8AWRfTXb2zd9AAEAUgJWBEIDOwADABG1AwIEBQMAAC8yERIBOTkxMBMhFSFSA/D8EAM75QD//wC+/i8DrAYOACcAX/73AAAABwBfAQoAAAAAAAIAGQPBA3cFtgAHAA8AIkASAwcLDwQQEQTAA9ADAgzAAAgDAD8zGs1dMhESARc5MTABIRYTIyYCJyUhFhMjJgInAfoBGCVA2zthFP47ARglQNs7YRQFtvX/AH8BC1UW9f8AfwELVQAAAf/X/vABsgTRAAUACrICAAMALzMvMTATESMRIRUrVAHbBIH6bwXhUAAAAAAB/k7+8AApBNEABQAKsgQAAQAvMy8xMAE1IREjEf5OAdtUBIFQ+h8FkQAAAAH/Ev7wAO4FgwAHABdADAEABRAFIAUDBQYDBgAvLxDNXTIxMBMjESMRIxEh7sNWwwHcA9H7HwThAbIAAAH/Ev7wAO4FgwALABtADgoBAAUQBSAFAwUDBQkGAC8zMy8vXTMzMTATIxEjESMRIRUhESHuw1bDAdz+dAGMA9H7HwThAbJQ/uwAAAAB/xL+8ADuBYMACwAbQA4ABwALEAsgCwMLBAkDBAAvMy8Qxl0yMjEwAyERITUhESMRIxEj7gGM/nQB3MNWwwQfARRQ/k77HwThAAD//wCFA6YE6AW2ACYABQAAAAcACgNMAAD////6BhQEBgbdAgYAcQAAAAQAdf/PAdMF4QAKABUAIAArAEZAJgsWIQMAEBsmAwUABSwtGB5/WRMNf1kTGBMYCCMIAn9ZCCMpf1kjAC8rABgvKxESADk5GC8vKysREgE5OREXMxEXMzEwNzQzMhYVFAYjIiYRNDMyFhUUBiMiJhE0MzIWFRQGIyImETQzMhYVFAYjIiZ1rlBgYFBSXK5VW2BQUlyuUGBgUFJcrlBgYFBSXG+fTFNRT08B7KBSTlBQUAHsoE1TUU9PAe2fTFNRT08AAAAB/xL+8ADuBYMADwAhQBEIAAAADxAPIA8DDwQNBwMDBAAvMxEzLxDGXTIRMzEwAzMRIzUhFSMRMxUjESMRI+7DwwHcw8PDVsMEHwEUUFD+7E77HwThAAAAAv8S/vAA7gWDAAMACwAbQA4ACAAEEAQgBAMEBQoDBQAvMy8QzV0yMjEwAyERIQMRIREjESMRngE8/sRQAdzDVgQfART+ngGy/k77HwThAAAB/xD+8ADwBYMABQAVQAoBBQIwAkACAgIDAC8zXREzLzEwEyMRAyEDK1bFAeDF/vAFJAFv/pEAAAH/EP7wAPAFiQAGAB9AEQCvAwHPAwEDBQKQBQHABQEFAC9dcS8QzV1xMjEwExEjESMTEytWxfDwA9H7HwThAbj+SAAAAv8Q/vAA8AWFAAYACgAeQA4DCQcFBAQKBgICBAEIBAAvMy8RMxEzMxIXOTEwEyMRJzcXBzcnBxcrVsXw8MVUf39//vAFBLbb27a2cXFxAAAB/xL+8ADuBYMADQAdQBAJAAYDAA0QDSANAw0ECwMEAC8zLxDGXRcyMTADMxEjNSERMxUjESMRI+7DwwEZw8NWwwQfARRQ/pxO+x8E4QAAAAIAKQI1At8FywALABUAIEAOABEMBhEGFhcJEx8DDiEAPzM/MxESATk5ETMRMzEwARQWMzI2NTQmIyIGBRAhIiY1ECEyFgElLTEyLi4yMS0Buv6ipLQBWKm1BAB/fXyAf3t7ff4z7eABye0AAAIAMwI5At0FyQAWACIAPkAiBhELICAAERoAGiMkCx0ADhAOYA5wDoAOBQ4OFAgDHxcUIQA/Mz8zEjkvXTM5ERIBOTkRMxEzETMRMzEwEzQ2MzIXFSYjIgYHMzYzMhYVFAYjIiYFMjY1NCYjIgYVFBYz9OlDPC42jIsICEd+eoq2lKS8AVQ1Qjg7OEVCA8/7/w+8FnCDYpSCiqrVIkZBN0JAK0FUAAAAAAIAKwI5AtUFyQAWACIANkAbBRoRACALEQsjJAwUHQ8OHw4CDg4DFxQfCAMhAD8zPzMSOS9dMxI5ERIBOTkRMzMRMzMxMAEUBiMiJzUWMzI2NyMGIyImNTQ2MzIWJSIGFRQWMzI2NTQmAtX250k2MTOMiwgIR356iraUpLz+rDVCODs3RkQEM/78D7wWcINilIOJqtUjR0E3QT8rQ1MAAP//AFr/UwMxAnoBBwXLAAD8rQAJswEAFFMAPzU1AAAA//8AaP9TA1YCeAEHBdEAAPytAAmzAQANUwA/NTUAAAD//wBo/1UDfQJ6AQcF2gAA/K0ACbMBABVTAD81NQAAAAABACf/YAN3AmoACwAvQBcBCwYDCQUHBwkACwQMDQkDCwQBUggLUAA/Mz8zEjk5ERIBFzkRMxEzMxEzMTAlATMXNzMBASMDAyMBOf76/KCg+/72ARf+qqz87gF89/f+hP5yAQr+9gAA//8AYv9TA2ACeAEHBdIAAPytAAmzAQAHUwA/NTUAAAAAAQArAAAEZAXLAB0AikBQCRoOBBYaGhEdFBwcGB0EBB4fARkRFg8ZAQ4FFhmIWeYWAdUWAVoWAboWARkWqRYCAw8WnxYCCgYWFh0SEhWIWRISHQcHDIhZBwQdGohZHRIAPysAGD8rERIAORgvKxESADkYL19eXV9dXXFdXSsAX15dETMRMxESARc5ETMRMzMRMxEzETMxMAEjIgIREBIzMhcHJiMiERQWFxEhFSEVIRUhFSEVIQIODOnu9OOyklp2WdNnXQJW/r0BKf7XAUP9qgGaAQ4BCAEDARhE6zn+35CjAgF75pXlsekAAAADAEr/iQSoBhIAHgAlACsAdUBGBwAmFCAoGhkJCQwEIgMjABwcIx8iDBcLKQ4oERAUDSwtBCAoKQMjKQclIgwia1kQCwkRCwMMGiVrWR2vGL8YAhgXABgDGgAvFzMvXTMrABgvFzMvMysREgAXORESARc5ETMRMxEzETMRMzMRMxEzETMxMAEXBycDNjcVBgcHIzcmJwcjNyYCNRAAJTczBxYXNzMBAxYXEyYjARQXEwYGBDlvYkKieJmaqiGVIU5CJZUvnpcBLQEMIZUeS0Yhlf6FqDdXrD49/n9Ei2RrBUgt9h/85Q85/EEJoqIDFLnqUQE82QEUAWcjm5MEDKP+c/zDHAcDUBD+Ur1mAq4wyAAAAAABAEr/7ARoBc0AJQBLQCYiFw4DHA4ICAUcAyYnCAUUBgwMEWtZDAwZICAAa1kgBBkUaVkZEwA/KwAYPysREgA5GC8rABgQxBE5ORESARc5ETMRMxEzMzEwASICFRAXETMXMzY2MzIXByYjIhUVNjcVBiMgABE0EiQzMhcHJiYC06Oz0bwjDCNnUS4cHxghpnlirN3+0/7FmgEkx8fSZGyNBNP+8+/+fVYCf5ZKWgr6Ct/AGif8SwF6AW3nAVq5afE2KgAAAQCg/x8HQgVUACgAV0AuGSEEEREOAAUBGhcgISEXAQ4EKSoSFxoFAgUVBA4PDyEBDhUlCRUJXVkYGBwVEAA/MzMvKxEAMxg/MzM/EMYSFzkREgEXOREzETMRMzMRMxEzETMxMCEhNQMjATU0JiMiBhURIREzFzM2NjMyFxMzBzYzMhYVESERNCYjIgYVBIn+z5rMAWZRV3Vq/s/pKREtqm7OYZ7Nc0dLvsP+zlFXcG9o/rcDAmx5eazF/fIEXo9NV3EBUvgXw9f9JwKNeXmgrgAAAAAFABQAAAR/BbYAGwAfACMAJgAqAKxAbQoQGxckHBQUARkVBB4jHQgMEAUhKRAnJykgEh0eJiUVFworLBwgDA8qEwYXGBgXcFmfGAEAGBAYoBiwGAQJAxgAJQQICyIeBhsAABtwWZAAAR8ALwACLwCvAL8A3wD/AAUAJAAnAxUGAgMRFRIAPzM/MxIXOS9dcV0rERIAFzkYEMZfXl1dKxESABc5ERIBFzkRMxEzMxEzMxEzETMRMzMzETMzETMRMzEwEzMRIRMzETMRMxUjFTMVIxEhAyMRIxEjNTM1IwUzJyMFMzUjARczASYnIxR3AUKktuN1dXV1/r2ktOR3d3cBW4MvVAFkVIX+ugwpAagDCSkDvAH6/gYB+v4Gl5SX/gYB+v4GAfqXlJSUlJQBQar9nBqIAAMAmv/sBxIFtgAIABcAOwB/QEYvGBUQEgQeKgAKCgsQBDUqGCQkKgQLBDw9KjUkGAQbLS0zX1ktEBIJAAAJaVkAABAAAhADAAAMFgsSDAhpWQwDGyJfWRsWAD8rABg/KwAYPzMSOS9fXl0rERIAORg/KxESABc5ERIBFzkRMxEzETMRMxEzETMRMxEzETMxMAEzMjY1NCYjIxERIREhMhYVEAcWEjMhAwUUBiMiJic1FhYzMjU0JicmJjU0NjMyFwcmJiMiFRQWFx4CAbpIal1jaET+4AF/8eLTX8kB/sDuBP7JxmeNQUWpP4dGepBmxK6rn0pCdTxtPX5uXS4DLWdkaFj9ef3PBbbP3f71btX+RAIx5ay0ICH8KDZgLkNDSpR0mKpY3CQuTSk3QDhYdAAHABQAAAXRBbYAHwAjACcAKwAuADIANQC+QIALEgIfGycdKCYIKiMpECISEw4PDxMJIiEsLiAWKSo0NSsXBSYlMS8kBBodHgEbGzY3Fy0yEwQbHBtwWSAkKBAEnxwBABwQHKAcsBwECQMcACMnKw8EHwAfcFkECAwzBJAAAR8ALwACLwCvAL8A3wD/AAUALy41AAQZCgYCAxUZEgA/Mz8zMxIXOS9dcV0XMysRABczGBDGX15dXRcyKxEAFzMREgEXOREzETMRMxEzETMRMxEzETMzETMxMBMzAzMTMxMhEzMTMwMzFSMHMxUjAyEDIwMhAyM1MycjBTM3IwUzNyMFMycjASMXITY3IwEzJxRnVOdOvVgBBFa+TOdUaYEZmrJU/uFcvVr+4FSxmBl/A+xeFY79pFwZiwE9gxpOAX0xGv2DBRQvAT8pFAO8Afr+BgH6/gYB+v4Gl5SX/gYB+v4GAfqXlJSUlJSUlP7VzUmEAcK9AAABABAAAASTBbYAFABcQDMIDQ0WFBIEEREBEgkPCw8SAxUWDA8QAxQAFIZZBQkEA+AA8AACA3EAAQQAABIHAgMOEhIAPzM/MxI5L19dX10XMysRABczERIBFzkRMxEzMxEzETMRMxEzMTATMxEhETM3ASEBIRUjASEBBxEhESMQgwEyEkoBMQE//mMBQ/gBVP61/sRH/s6DAzMCg/19mAHr/X3G/ZMCXjP91QJtAAAAAAEAKQAABGoFtgAXALNAYQ4GCgAUFBEMEBAVAREKCAgRAwMYGRQSFQ8MDgYNEwAWAQsICgYJFw8THxMCE0AMD0gTIA0wDQIADRANMA1ADWANBQ0NEQQPFx8XAgkXQAwPSBdAAAkQCSAJgAmQCQUTAwm4/8BAEAwPSAkJBBESBwMEA2lZBAMAPysRADMYPxI5LytfXl0azSteXRESOS9dcc0rXRESFzkREhc5ERIBFzkRMxEzMzMRMxEzETMRMzMxMBMlESERIREhFTcXBRU3FwURITUHJyU1B64BAv55BEH+d7tW/u+5Vv7x/s+uVAECrAJtsAGXAQL+/saBfbu2f324/jX6d32wtnYAAAADACn+FAfXBcsAEgAeADgAWEAwNiciLwAcHBcMLykqJwc5OikpJx8QE21ZEBAnHwwbHzNrWR8EJytrWScSBBltWQQTAD8rABg/KwAYPysAGD8REjkvKxESADkYLxESARc5ETMRMxEzMTABFAYGIyImJyMGBgMhEzY2MzIWBSIGBwcWMzI2NTQmASAAERQCBCMhEyEDMzI2EjU0JiMiBgcnNjYH11yweUVqGggGFU//AKcs0bqrt/6YRlcWDCZnS2FA+0MBUgGAzP6M7f6g7gEnuQ+W5XrwwFS5T1xL/AFzcrBlKRw0bv6FAyXOtasNY25BTnNaRU4Dx/6s/tf9/n7PBF78mpcBE7Cs1SQk5iczAAIAAP4UBHEFzQAiACwAW0AvFwoaJx0TGQMaAAUFIxAQGhkdFQUtLhMqIBkVFBgYIBcUFxIgJWxZIAQIDWxZCCMAPysAGD8rABg/LxESOS8SOTkROTkREgEXOREzMxEzETMRMxEzETMyMTABFAIHEhEUBiMiJzUWMzI2NTQmJwMDByMBEzcmJjU0NjMyFgU0IyIVFBYXNjYEZmF45MS3SEgwPzI9NkvcqGrVAT2kfY5TzLeuuP76Xlw4LCguBFR7/vet/m3+6bG0E+sUQTxPv57+oAEIzwJM/tPI/u1nwtLEuZOdRK9JR7QAAAAABAAUAAAEfwW2ABsAIQAmACwA9ECfGxcXFR8iLAMUFAEZFR0hIA4PBwUFDyAjKhUGLS4PKxcPFx8XAhQFGBdvWQwfTxhfGAICPxhPGF8YvxjPGAUPGB8YLxi/GM8YBQkDGAAIHhsPGx8bAhQFABtvWQUiEAAgAHAAgACQAAUDAAATAgAsYCwCDAUTLGtZQBOAE5ATsBPAE9ATBg8TAQ8T/xMCCwMTEwIVEg8mAQwFAiZrWQIDAD8rAF9eXRg/EjkvX15dcV0rAF9eXRESORgvX10zMysAX15dETMzGBDGX15dcV9dMjIrAF9eXREzMxESARc5ETMRMxEzMxEzMzMRFzMRMxEzMTATMxEhIBMzFSMXFRQHMxUjBgQjIxEhESM1MzUjBSchFSE3JSEmIyMTMjY3IRUUdQGsAZFMbVoCBFx5Nv7t3Cf+z3V1dQLiAv7GAToC/sQBGTB8bSFVdSL+8wRqAUz+tI0XFiwgjaOr/ncC1415Gxt5IOZX/b4oMFgAAwB3/1wFOwYUABUAHAAhAHVAQgsRFgMPHgkDFBQGGRURICAVAwMiIw8daVkNDwELBA8PEwkaDghAFSlICEAJDUgICAYJCQ5pWQkEGR4THmlZFQATEwA/M80rEQAzGD8rEQAzMxgvKysRMxESOS9fXl0rERIBFzkRMxEzMzMRFzMRMxEzMTAFJAAREAAlNTMVFhcHJicRIREGBxUjARQWFxEGBgERNjcRAsv+3v7OATUBH7Ltz2aeuAG+wP6y/uuLioOSAcdmKRAWAYABVwEyAYEuVkkEVvhMBP5s/QJBCJIDe8f0IgOyMvL+q/67BAgBOQAAAAMAAAAABTMFtgAXABsAIQCSQFUMIxEXExMSDxUYFhsBBxkGCgoICAQOGRogARYVEgoiIxwRAgsPExQTcFkIGJ8UAQAUEBQCCQMUAAcbFwAXcFkEH3AAgADgAAMPAB8AAgAAAg0REgIDAD8/MxI5L11dMzMrEQAzMxgQxl9eXV0yMisRADMzERI5ERIBFzkRMxEzETMRMxEzETMRMxEzMhEzMTATIRMhEyEVIxczFSMTIQMhAyETIzUzNyMFIScjEwYGBzMmFAEhuwFSugEj8DG/jKD+y5H+XJT+y56KvTHuAeQBRS3pcwwuCos7A5MCI/3dl5SX/i8B0f4vAdGXlJSUAbY/viLUAAEAFP/sBH0FywAvAJRAVhMCHCIiKxoIAwMAJQIgBxgaGgwMBwIABDAxIwMEA3BZIJ8EAQAEEAQCCQMECR0ICQhwWRoPCQEPCY8Jnwm/Cd8J/wkGCwMJCS0VFQ9pWRUELShpWS0TAD8rABg/KxESADkYL19eXXEzKxEAMxgQxl9eXV0yKxEAMxESARc5ETMRMxEzETMRMxEzETMzETMRMzEwEzQ3IzUzNjchNSE2NTQmIyIGByc2MzIWFRQHMxUjBgYHIRUhBhUUFjMyNxEGISIkag9ls0aE/oMCrTVgW0uddlzj8Nr3EWm/LnlCAaj9RCdqa8nlvP778P7+AYs6NZdTQZczTkNJJjDxZ9PBQjmXM0Qdly1CS1Re/v5c0wAAAAIAd/9cBNEGFAAWABwAVUAtFwMJDxUVBhkWCxIWAwQdHgdAFSdIBwZADhoGGmpZCQYEFhYUABkPAA9pWQATAD8rEQAzETMzGC8/MysRADMaGBDNKxESARc5ETMzMxEzMxEzMTAFJAAREAAlNTMVFhcHJicRNjcRBgcVIwEQBREGBgLL/t/+zQE+ARaysaNklFxvrpKLsv7rARWEkRITAX8BWQFBAZEcTU0UT/xFEfw0DTz+/DoLlgN7/lk6A8gf/QAAAAABAIUAAAQhBbYAGQCGQFERDgUZGRATCQEIDQ0ODgsYARMFGhsOGQAZcFkLAAAGExAUFBBuWR8ULxQCXxQBMBSAFAIAFBAUYBRwFIAUwBTQFAcJAxQUBhISCQUGBXBZBgMAPysRADMYPxI5L19eXXFdcSsREgA5EjkYLzMrEQAzERIBFzkRMxEzETMRMzMRMxEzMTATISYmIyM1IRUhFhczFSMCBQEhATUzMjY3IYUBjRR6XaIDnP6+SBLo5Cb+ugGR/pT+j3ttkhX+cQRoVmGXl05pl/7hQf2PAoOYX1cA///+EQM7Ae8G+QAHAA390gDlAAAABABk/+wGRAXLAAcAEgAiADIAWUA0IxMACQkKDwMbKysDChMEMzQACAgLDwofCn8KjwoECgoXBwALEAtwC4ALBAsLHy8XBCcfEwA/Mz8zEjkvXTMROS9dEjkvMxESARc5ETMRMxEzETMRMzEwATMyNTQmIyMRESMRITIWFRQGIyU0EiQzMgQSFRQCBCMiJAI3FBIEMzIkEjU0AiQjIgQCAuWQqlNZjpsBL6ibqYb818gBXsrIAV7Kwv6i0M/+osNtrAErrKwBKq2s/tWsrP7WrQLbolFJ/kX+vwN/jYyCo3/IAV7KyP6iysX+ptDPAVrGrP7WrawBK6ysASqtrP7VAAQACv/4BgYFtgAHACsAMQA1AIVATSAILAExAg0bLjQ0AgIEBwEmGwgUMhQbAQQFNjcbJhQIBAseAjExLgUABAAEEAQwBEAEBCMeDx4fHj8eTx4EBB4EHgsFNQM0EgUDEQsSAD8zPz8/ERI5OS8vXREzXREzEjk5LzMREhc5ERIBFzkRMxEzETMRMxEzETMRMxEzETMRMzEwAScjByMTMxMBFAYjIic1FhYzMjY1NCYnLgI1NDYzMhcHJiMiBhUUFhcWFgEnJwYGBwEBIwECFzLjMcf63/4DI5uSg2Y9aDg2PkEzW00tjoh0hjN/TiUnQUR1UPu0Nw4DBzgETPzV8AMrAvaXlwLA/UD93WlyMaIXHhccGiAWJTlMNWBxMZgrGxQcIRksXgMNqj0PHroBjfpKBbYAAQA9AAADSgReAAkAPEAhAgkFBQcDAwoLCAdgWYAIAQMPCAEKBggIAwAPAwRgWQMVAD8rABg/EjkvX15dX10rERIBFzkRMzMxMAEhESE1IREhNSECGQEx/PMB3P5NAbMEXvui5QEN5f//ADr/8AauBbYAJwIXAsMAAAAmAHveAAEHAHUD+P23AAeyAhgTAD81AAAA//8AHv/wBvgFywAmAHTvAAAnAhcDIQAAAQcAdQRC/bcAB7ICJBMAPzUAAAD//wA//+wDwQRzAgYEQwAAAAEBogBkBl4CRAANADFAHQsACQINAgAFBA4PCAADEANwA4ADkAMFAwAIAwMLAC8XMy9dLxESARc5ETMRMzEwARYXIyYnNTY3MwYHIRUCgTk+SH+Pj39IPjkD3QEpRIGWSCRIloFEVgAAAQEQ/8MC8AR/AA0AHkANDA0JDQIDDg8JAgUNBQAvLxDEMhESARc5ETMxMAEGBzU2NzMWFxUmJxEjAdVEgZZIJEiWgURWA6A6PUh/j49/SD06/CMAAQGiAGQGXgJEAA0AMUAdAgkACwYLCQwEDg8ACRAJcAmACZAJBQkCDAIJAw0ALxczLy9dERIBFzkRMxEzMTABJiczFhcVBgcjNjchNQV/OT5If4+Pf0g+OfwjAX9EgZZIJEiWgURWAAABARD/wwLwBH8ADQAcQAwACwMLCAMODwIJBgwALy/EMhESARc5ETMxMCU2NxUGByMmJzUWFxEzAitEgZZIJEiWgURWojk+SH+Pj39IPjkD3QAAAAABAaIAZAZeAkQAFwA/QCULAAkCDhUMFxIXFQIABQYYGRUAAxADcAOAA5ADBQMOCAAIAwMLAC8XMy8zL10zERIBFzkRMxEzETMRMzEwARYXIyYnNTY3MwYHISYnMxYXFQYHIzY3AoE5Pkh/j49/SD45Av45Pkh/j49/SD45ASlEgZZIJEiWgUREgZZIJEiWgUQAAAAAAQEQ/8MC8AR/ABcAKEASAhQMFwkPDxcUAxgZDhUSCQIFAC/EMi/EMhESARc5ETMRMxEzMTABBgc1NjczFhcVJicRNjcVBgcjJic1FhcB1USBlkgkSJaBRESBlkgkSJaBRAOgOj1If4+Pf0g9Ov0COT5If4+Pf0g+OQAAAAIBEP9IAvAEfwADABsAMEAWGAYDEBsTDQICGwMDHB0DABIZFg0GCQAvxDIvxDLOMhESARc5ETMzETMRMzMxMAUhFSETBgc1NjczFhcVJicRNjcVBgcjJic1FhcBEAHg/iDFRIGWSCRIloFERIGWSCRIloFEaFAEWDo9SH+Pj39IPTr9Ajk+SH+Pj39IPjkA///+dwAAApEFtgIGAhcAAP//AHUCKQHTA30CBgB5AAAAAQGYAAAGYATHAAUAGEAJAgUFBAYHAgUAAC8vMxESATk5ETMxMAEzESEVIQGYXgRq+zgEx/uXXgABARf//gSqBAgAEwAeQAwTAAoLAAsUFQsADwUALzMvMhESATk5ETMRMzEwBRE0NjYzMhYWFREjETQmIyIGFREBF3LRg4PTd2bFoKLAAgIAlfCFhfKT/gACAr7k4cP+AAADAGQA9ARIBFAAAwAHAAsAQEAmCAAECwMHBAcMDQRQBQEFAF8BAQEIBQEDDwkvCT8JbwnfCe8JBgkAL10XMy9dMy9dMxESATk5ETMzETMzMTATNSEVATUhFQE1IRVkA+T8HAPk/BwD5AO8lJT9OJOTAWSUlAAAAAACAJ4AAAQ3BIEABAAJAB5ADAUABAYABgoLBQAIAgAvMy8yERIBOTkRMxEzMTAzEQEBESUhEQEBngHMAc38twL5/oP+hAJ7Agb9+v2FUgIGAar+VgABAFgA+AQ5Az8ABQAYQAkEBQUCBgcFAwAALzIvERIBOTkRMzEwEyEVIREjWAPh/PrbAz/b/pQAAAECI/4UA9MGqgAVABxACwABAQgWFwsFAREFAC8zLxDNERIBOTkRMzEwASMRNDYzMhYVFAYjIicmJyYjIgcGFQK0kah9P0wzJR8MESYhESILBv4UBtzE9kAvKTMKCSknJyNpAAABAQT+FAK0BqoAFAAaQAoCFAgUFRYLEQUAAC8vM80REgE5OREzMTABMxEUBiMiJjU0NjMyFxYXFjMyNjUCI5GihTlQMyMjGQoeHxEcGQaq+SPD9j4vJzUQBCklM38AAAAB//YCpgW0AzcAAwARtQMFAAQAAQAvMxEBMxEzMTADNSEVCgW+AqaRkQAAAAEB1/4UAmgHyQADABO2AgMDBAUDAAAvLxESATkRMzEwATMRIwHXkZEHyfZLAAAAAAECjf4UBbQDNwAFABpACgIHBAUFBgcFAwAALzIvERIBOREzETMxMAEhFSERIwKNAyf9a5IDN5H7bgAAAAH/9v4UAx8DNwAFABhACQADBAQGBwQAAQAvMy8REgE5ETMyMTADNSERIxEKAymSAqaR+t0EkgAAAQKNAqYFtAfJAAUAGkAKBAcCBQUGBwUCAAAvLzMREgE5ETMRMzEwATMRIRUhAo2SApX82QfJ+26RAAAAAf/2AqYDHwfJAAUAGEAJAAUCAgYHAAEDAC8vMxESATkRMzIxMAM1IREzEQoCl5ICppEEkvrdAAABAo3+FAW0B8kABwAgQA0ECQIGBgcHCAkFAgcAAC8vLzMREgE5ETMRMxEzMTABMxEhFSERIwKNkgKV/WuSB8n7bpH7bgAAAAAB//b+FAMfB8kABwAcQAsABQIGBggJAAEGAwAvLy8zERIBOREzMzIxMAM1IREzESMRCgKXkpICppEEkvZLBJIAAf/2/hQFtAM3AAcAHkAMAwkABQYGCAkGBAABAC8zMi8REgE5ETMyETMxMAM1IRUhESMRCgW+/WuSAqaRkftuBJIAAAAB//YCpgW0B8kABwAeQAwHCQAFAgIICQAFAQMALy8zMxESATkRMzIRMzEwAzUhETMRIRUKApeSApUCppEEkvtukQAAAAH/9v4UBbQHyQALAChAEQcNAAUJCQIKCgwNCAAFAQoDAC8vLzMzMhESATkRMzMRMzIRMzEwAzUhETMRIRUhESMRCgKXkgKV/WuSAqaRBJL7bpH7bgSSAAAC//YB8gW0A+wAAwAHADZAHQMHBwkABAQIBF8FAQMFqAAByAABBgCwAQEPAQEBAC9dXTNfXXEvX10zEQEzETMRMxEzMTADNSEVATUhFQoFvvpCBb4DWpKS/piRkQAAAAACAdn+FAPTB8kAAwAHAB5ADAIDBgcDBwgJBwMEAAAvMi8zERIBOTkRMxEzMTABMxEjATMRIwHZkZEBaZGRB8n2Swm19ksAAAABAo3+FAW0A+wACQA+QCECBgYLBAgICQkKCwdfBAEDBAmoAwHIAwEGA7AAAQ8AAQAAL11dMl9dcS8vX10zERIBOREzETMRMxEzMTABIRUhFSEVIREjAo0DJ/1rApX9a5ID7JLXkfwiAAEB2f4UBbQDNwAJACZAEAELBwgDBAgECgsECAIGBgkALzMRMy8zERIBOTkRMxEzETMxMAEVIREjESMRIxEFtP4fkdiRAzeR+24EkvtuBSMAAgHZ/hQFtAPsAAUACwBCQCMCCAgNBAUKCwULDA0JXwYBAwYLBagDAcgDAQYDsAABDwABAAAvXV0yX11xLzMvX10zERIBOTkRMxEzETMRMzEwASEVIREjASEVIREjAdkD2/y2kQFpAnL+H5ED7JL6ugRvkfwiAAAAAf/2/hQDHwPsAAkAOkAfBAAABwIICAoLAF8BAQMBCKgEAcgEAQYEsAUBDwUBBQAvXV0zX11xLy9fXTMREgE5ETMzMhEzMTADNSE1ITUhESMRCgKX/WkDKZIB8pHXkvooA94AAAH/9v4UA9MDNwAJACJADgAHCAMECAQKCwQIBgABAC8zMi8zERIBOTkRMxEzMjEwAzUhESMRIxEjEQoD3ZHYkQKmkfrdBJL7bgSSAAAC//b+FAPTA+wABQALAEBAIgQJCQYHAQIHAgwNCV8KAQMKAgeoBAHIBAEGBLAFAQ8FAQUAL11dM19dcS8zL19dMxESATk5ETMRMzIRMzEwAREjESE1ASMRITUhA9OR/LQCdJH+HQJ0A+z6KAVGkvooA96RAAECjQHyBbQHyQAJADxAIAQICAsCBgYJCQoLqAUByAUBBgWwAgEPAgECCV8GAQYAAC8vXTMvXV0zX11xERIBOREzETMRMxEzMTABMxEhFSEVIRUhAo2SApX9awKV/NkHyfwjkteRAAAAAQHZAqYFtAfJAAkAJEAPBAsIBQIJBQkKCwIFCAAGAC8zLzMzERIBOTkRMxEzETMxMAEzESEVIREzETMDQpEB4fwlkdgHyftukQUj+24AAAACAdkB8gW0B8kABQALAEBAIgoEBA0CBQgLBQsMDagLAcgLAQYLsAgBDwgBCAVfAgECBgAALzIvXTMvXV0zX11xERIBOTkRMxEzETMRMzEwATMRIRUhATMRIRUhAdmRA0r8JQFpkQHh/Y4Hyfq6kQXX/COSAAH/9gHyAx8HyQAJADhAHgQAAAkGAgIKC6gEAcgEAQYEsAUBDwUBBQBfAQEBBwAvL10zL11dM19dcRESATkRMzMyETMxMAM1ITUhNSERMxEKApf9aQKXkgHykdeSA936KQAAAAAB//YCpgPTB8kACQAiQA4BBgMABwMHCgsGAQIIBAAvMy8zMxESATk5ETMRMzIxMAEhNSERMxEzETMD0/wjAeOR2JECppEEkvtuBJIAAv/2AfID0wfJAAUACwA+QCEJAQEICwADCwMMDagJAcgJAQYJsAoBDwoBCgFfAgECBAYALzMvXTMvXV0zX11xERIBOTkRMxEzMhEzMTABITUhETMhMxEhNSED0/wjA0yR/gaR/YwB4wHykQVG+5GSAAECjf4UBbQHyQALAEJAIwQICA0CBgoKCwsMDQlfBgEDBqgFAcgFAQYFsAIBDwIBAgsAAC8vL11dM19dcS9fXTMREgE5ETMRMzMRMxEzMTABMxEhFSEVIRUhESMCjZIClf1rApX9a5IHyfwjkteR/CIAAAAAAgHZ/hQFtAfJAAcACwAqQBIEDQoLAgYGBwsHDA0FAgcLAAgALzMvMy8zERIBOTkRMxEzETMRMzEwATMRIRUhESMBMxEjA0KRAeH+H5H+l5GRB8n7bpH7bgm19ksAAAAAAwHZ/hQFtAfJAAMACQAPAExAKA4GBhEAAQwICA8JAQkQEQdfBAEDBKgPAcgPAQYPsAwBDwwBDAkBCgIALzMvMy9dXTNfXXEvX10zERIBOTkRMzMRMxEzETMRMzEwASMRMxMhFSERIxEzESEVIQJqkZHYAnL+H5GRAeH9jv4UCbX6upH8Igm1/COSAAAB//b+FAMfB8kACwA+QCEEAAAJBgIKCgwNAF8BAQMBqAQByAQBBgSwBQEPBQEFCgcALy8vXV0zX11xL19dMxESATkRMzMzMhEzMTADNSE1ITUhETMRIxEKApf9aQKXkpIB8pHXkgPd9ksD3gAC//b+FAPTB8kABwALACZAEAAFAgYKCwYLDA0AAQsGCAMALzMvMy8zERIBOTkRMxEzMzIxMAM1IREzESMRATMRIwoB45GRAWmRkQKmkQSS9ksEkgUj9ksAA//2/hQD0wfJAAMACQAPAEpAJwcNDQYKCgkLAgMLAxARDV8OAQMOqAcByAcBBgewCAEPCAEIAwsABAAvMy8zL11dM19dcS9fXTMREgE5OREzETMzETMyETMxMAEzESMBMxEhNSETIxEhNSEDQpGR/peR/YwB45GR/h0CdAfJ9ksJtfuRkvooA96RAAAC//b+FAW0A+wABwALAEJAIwsDAw0IAAAFBgYMDQQAXwEBAwEGqAgByAgBBgiwCQEPCQEJAC9dXTNfXXEvL19dMzIREgE5ETMyETMRMxEzMTADNSEVIREjEQE1IRUKBb79a5L9aQW+AfKRkfwiA94BaJKSAAH/9v4UBbQDNwALAChAEQMNAAkKBQYKBgwNBgoECAABAC8zMjIvMxESATk5ETMRMzIRMzEwAzUhFSERIxEjESMRCgW+/h+R2JECppGR+24EkvtuBJIAAAAD//b+FAW0A+wABQALAA8ATkApDQgIEQ4DAwABCgsBCxARCQMDBl8EAQMECwGoDgHIDgEGDrAPAQ8PAQ8AL11dM19dcS8zL19dMzMRMxESATk5ETMRMzIRMxEzETMxMAEjESE1ITMhFSERIwEVITUCapH+HQJ02AJy/h+RAnL6Qv4UA96RkfwiBdiSkgAAAAAC//YB8gW0B8kABwALAEBAIgcLCw0ACAgFAgIMDagAAcgAAQYABbABAQ8BAQEIXwkBCQMALy9dMy9dXTMzX11xERIBOREzMhEzETMRMzEwAzUhETMRIRUBNSEVCgKXkgKV+kIFvgNakgPd/COS/piRkQAAAAH/9gKmBbQHyQALAChAEQsNAAUCCQYCBgwNCQUAAQcDAC8zLzMzMxESATk5ETMRMzIRMzEwAzUhETMRMxEzESEVCgHjkdiRAeECppEEkvtuBJL7bpEAAAAD//YB8gW0B8kABQALAA8ATEAoBA8PEQkMDAgLAgULBRARBQmoCQHICQEGCQKwCgEPCgEKDF8NAQ0ABgAvMy9dMy9dXTMzX11xETMREgE5OREzETMyETMRMxEzMTABMxEhFSEBMxEhNSEBNSEVA0KRAeH9jv6Xkf2MAeP+HQW+B8n8I5IEb/uRkv4GkZEAAAAB//b+FAW0B8kAEwBWQC0LDw8VBAAACQ0REQYCEhIUFRAADV8BAQMBDASoBAHIBAEGBAmwBQEPBQEFEgcALy8vXV0zM19dcREzL19dMzMyERIBOREzMzMRMzMyETMRMxEzMTADNSE1ITUhETMRIRUhFSEVIREjEQoCl/1pApeSApX9awKV/WuSAfKR15ID3fwjkteR/CID3gAAAAAB//b+FAW0B8kAEwA+QBwEFQ0SCgoPCwIGBhMHCwcUFQUJDQ0CEg4HCwAQAC8zLzMvMzMzETMzERIBOTkRMzMRMxEzMxEzMhEzMTABMxEhFSERIxEjESMRITUhETMRMwNCkQHh/h+R2JH+HQHjkdgHyftukftuBJL7bgSSkQSS+24AAAAE//b+FAW0B8kABQALABEAFwBkQDQEDg4ZFQkJFAYGFwcCEBAFEQcRGBkPCQkMXwoBAwoFFagVAcgVAQYVArAWAQ8WARYRBwASAC8zLzMvXV0zM19dcREzL19dMzMRMxESATk5ETMzETMRMzMRMzIRMxEzETMxMAEzESEVIQMjESE1ITMhFSERIwEzESE1IQNCkQHh/Y7Ykf4dAnTYAnL+H5H+l5H9jAHjB8n8I5L6ugPekZH8Igm1+5GSAAEAAALuBaoHyQADABG1AAUBBAECAC8vEQEzETMxMAEhESEFqvpWBaoC7gTbAAAAAAEAAP4UBaoC7gADABG1AAUBBAECAC8vEQEzETMxMAEhESEFqvpWBar+FATaAAAAAAEAAP4UBaoHyQADABG1AAUBBAECAC8vEQEzETMxMAEhESEFqvpWBar+FAm1AAAAAAEAAP4UAtUHyQADABG1AQAEBQECAC8vERIBOTIxMAEhESEC1f0rAtX+FAm1AAAAAAEC1f4UBaoHyQADABG1AAEEBQECAC8vERIBOTMxMAEhESEFqv0rAtX+FAm1AAAAACoAZv53BaoHJQADAAcACwAPABMAFwAbAB8AIwAnACsALwAzADcAOwA/AEMARwBLAE8AUwBXAFsAXwBjAGcAawBvAHMAdwB7AH8AgwCHAIsAjwCTAJcAmwCfAKMApwGRQPUCIjJKhgVqagMjM0uHBWsOLkZWegVubg8vR1d7BW8GHjZOigVmZgcfN0+LBWcSKkJafgVychMrQ1t/BXMKGjpSjgViYgsbO1OPBWMWJj5eggV2dhcnP1+DBXeSlpqepgWiopOXm5+nBaOjd2NzZ29rB6ipY2ejA2trYGSgA2hfW1dXXFhUT1OfA0tLTFCcA0hDP0dHQDxENzubAzMzNDiYAzArJy8vKCQsGx+XAyMjGByUAyAXEw8PFBAMBwuTAwMDBAiQAwCDf3t7gHx4aFRIRDAsIAwAeHgADCAsMERIVGgKhHRwbGx3c2+Lj6cDh4eIjKQDhAAvFzMzERczLzMzMxEzMxIXOS8vLy8vLy8vLy8RMzMzETMzERczMxEXMxEzMzMRMzMRFzMzERczETMzMxEzMxEXMzMRFzMRMzMzETMzERczMxEXMxEzMzMRMzMRFzMzERczERIBFzkRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczMTATMxUjJTMVIyUzFSMFMxUjJTMVIyUzFSMHMxUjJTMVIyUzFSMFMxUjJTMVIyUzFSMHMxUjJTMVIyUzFSMXMxUjJTMVIyUzFSMHMxUjJTMVIyUzFSMFMxUjJTMVIyUzFSMHMxUjJTMVIyUzFSMXMxUjJTMVIyUzFSMBMxUjJTMVIyUzFSMBMxUjJTMVIyUzFSMBMxUjETMVIxEzFSMRMxUjETMVIxEzFSNmaWkBnmlpAaJmZv2PaWkBoGhoAaBmZs9mZv5eaWn+YmlpBA9mZv5gaGj+YGlpz2lpAZ5paQGiZmbPZmb+YGho/mBpac9paQGeaWkBomZm/Y9paQGgaGgBoGZmz2Zm/l5paf5iaWnPaWkBoGhoAaBmZvzAaWkBoGhoAaBmZvvxaWkBnmlpAaJmZgGeZmZmZmZmZmZmZmZmBaRiYmJiYmNeXl5eXmBgYGBgYGVeXl5eXmBhYWFhYWReXl5eXmBjY2NjY2JcXFxcXGJjY2NjY15gYGBgYAfrYmJiYmIBJWBgYGBg/t9i/t9g/t1h/t5j/uBjB/BgAAAAAFQAAP53BaoHJQADAAcACwAPABMAFwAbAB8AIwAnACsALwAzADcAOwA/AEMARwBLAE8AUwBXAFsAXwBjAGcAawBvAHMAdwB7AH8AgwCHAIsAjwCTAJcAmwCfAKMApwCrAK8AswC3ALsAvwDDAMcAywDPANMA1wDbAN8A4wDnAOsA7wDzAPcA+wD/AQMBBwELAQ8BEwEXARsBHwEjAScBKwEvATMBNwE7AT8BQwFHAUsBTwNLQBQaSnqq8gXa2htLe6vzBdvbAjJiprgBCrYF1tYDM2OnuAELQBUF1x5Ojq72Bd7eH0+Pr/cF3wY2ZqK4AQ62BdLSBzdno7gBD0AVBdMiUn6y+gXi4iNTf7P7BeMKOmqeuAEStgXOzgs7a5+4ARNAFQXPJlaCtv4F5uYnV4O3/wXnDj5umrgBFrYFysoPP2+buAEXtQXLKlqGurgBArYF6uorW4e7uAEDtQXrEkJylrgBGrYFxsYTQ3OXuAEbtQXHLl6KvrgBBrYF7u4vX4u/uAEHtQXvFkZ2krgBHrYFwsIXR3eTQSMBHwAFAMMBJgEuATYBPgFKAAUBRgFGAScBLwE3AT8BSwAFAUcBIgEqATIBOgFOAAUBQgFCASMBKwEzATsBTwAFAUMBQwFHQAzD78fry+fP49Pf1w25AVABUbTDx8vP07gBQ7cG19fAxMjM0LgBQLYG1K+zt7u/uAE/twarq6ywtLi8uAE8tgaok5ebn6O4ATu3BqenkJSYnKC4ATi2BqR/g4eLj7gBN7cGe3t8gISIjLgBNLYGeGdrb3N3uAEztwZjY2RobHB0uAEwtgZgT1NXW1+4AS+3BktLTFBUWFy4ASy2Bkg3Oz9DR7gBK7cGMzM0ODxARLgBKLYGMB8jJysvuAEntwYbGxwgJCgsuAEktgYYBwsPExe4ASO3BgMDBAgMEBS4ASC0BgD3+/+6AQMBBwFLtQbz8/T4/LoBAAEEAUhAFwbw1KikeGBIMBgA8PAAGDBIYHikqNQKuAEItNzg5OjsuAFEtwbY2N/j5+vvQRQBRwAGANsBDwETARcBGwEfAU8ABgELAQsBDAEQARQBGAEcAUwABgEIAC8XMzMRFzMvFzMzERczEhc5Ly8vLy8vLy8vLxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMREgEXOREXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMyERczMxEXMzEwEzMVIzczFSM3MxUjNzMVIzczFSM3MxUjBTMVIzczFSM3MxUjNzMVIzczFSM3MxUjBTMVIzczFSM3MxUjNzMVIzczFSM3MxUjBTMVIzczFSM3MxUjNzMVIzczFSM3MxUjBTMVIzczFSM3MxUjNzMVIzczFSM3MxUjBTMVIyUzFSM3MxUjNzMVIzczFSMlMxUjBTMVIyczFSMnMxUjJzMVIyczFSMnMxUjBzMVIzczFSM3MxUjNzMVIzczFSM3MxUjFzMVIyczFSMnMxUjJzMVIyczFSMnMxUjBzMVIzczFSM3MxUjNzMVIzczFSM3MxUjATMVIzczFSM3MxUjNzMVIzczFSM3MxUjATMVIzczFSM3MxUjNzMVIzczFSM3MxUjEzMVIwczFSMXMxUjBzMVIxczFSMHMxUjFzMVIwczFSMXMxUjBzMVIxEzFSMTMxUjZmlpz2lpz2lp0Who0WZmz2Zm+4tmZs9mZs9mZs9oaNBpac9pafxaaWnPaWnPaWnRaGjRZmbPZmb7i2Zmz2Zmz2Zmz2ho0Glpz2lp/Fppac9pac9padFoaNFmZs9mZvuLZmYBnmZmz2ho0Glpz2lp/MNmZgOmZmbPZmbRaGjRaWnPaWnPaWlmZmbPZmbPZmbPaGjQaWnPaWlpZmbPZmbRaGjRaWnPaWnPaWlmZmbPZmbPZmbPaGjQaWnPaWn79GZmz2Zmz2Zmz2ho0Glpz2lp/Fppac9pac9padFoaNFmZs9mZs9mZmlpaWlmZmlpaWlmZmlpaWlmZmlpaWlmZmlpaWlpaWZmBaRiYmJiYmJiYmJiYmNeXl5eXl5eXl5eXmBgYGBgYGBgYGBgYGVeXl5eXl5eXl5eXmBhYWFhYWFhYWFhYWReXl5eXl5eXl5eXmBjY2NjY2NjY2NjY2JcXFxcXFxcXFxcXGJjY2NjY2NjY2NjY15gYGBgYGBgYGBgYAfrYmJiYmJiYmJiYmIBJWBgYGBgYGBgYGBg/t9iY15gYGVeYGFkXmBjYlxiY15gB+tiASVgAAAAQwAA/hQF1QclAEkATQBRAFUAWQBdAGEAZQBpAG0AcQB1AHkAfQCBAIUAiQCNAJEAlQCZAJ0AoQClAKkArQCxALUAuQC9AMEAxQDJAM0A0QDVANkA3QDhAOUA6QDtAPEA9QD5AP0BAQEFAQkBDQERARUBGQEdASEBJQEpAS0BMQE1ATkBPQFBAUUBSQFNAVEDQbkAAAFTQHpLa4ur+AXLywUJDREVBQEBaHyp6BoFyckHCw8TFwUDT2+Vr/QFz89MbIys+QXMZH+l5B4FxcVpfabpGwXGU3OPs/AF09NQcJKw9QXQYIOh4CIFwcFlgKLlHwXCV3eXt+wF19dUdJC08QXUXIed3CYFvb1hhJ7hIwW++74BCwEbASsBUAAFATsBO7ZYeJi47QXYvwEIARQBKQFIACoABQE5ATm3XYia3ScFuv9BHQEPASEBLwFMAAUBPwE/APwBDAEcASwBUQAFATwBBAEXASUBRAAuAAUBNQE1AQkBFQEmAUkAKwAFATa3NDg8QEQFSEi+AQABEAEeATABTQAFAUC3MjY6PkIFRkZBCwEFARgBIgFFAC8ABQEyATIBQAE2ATxACrrYvtTC0MbMAw25AVIBU0AMHCAkKCwwBhgWztLWuQE6AT63RwbKyr3Bxcm5ATUBObUGArzAxMi5ATQBOLUGBQWvs7e5ASsBL7VEBquusra5ASoBLrdDBqqqnaGlqbkBJQEptQYGnKCkqLkBJAEotQYJCY+Vl7kBGwEhtUAGi46UlrkBGgEgtz8Giop8f4OHuQEUARe1Bgp7foKGuQETARa1Bg0Nb3N3uQELAQ+1PAZrbnJ2uQEKAQ63OwZqalxgZGi5AQQBCLUGDltfY2e5AQMBB0AYBhERT1NX+/84BktOUlb6/jcGSkrc4OTouQFEAUi1BhLb3+PnuQFDAUe1BhUV7PD0uQFMAVBAHDQG+AKrBosKaw5LEvj4EksOawqLBqsCChbP09e5ATsBP7dIBsvLAevv87kBSwFPQAwzBvf3Gh4iJiouBhYALxczMxEXMy8zERczEhc5Ly8vLy8vLy8vLxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMQxhcyERIBFzkRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczERczMxEXMxEXMzMRFzMRFzMzERczMhEXMzMRFzMRMzEwASERMzUjETM1IxEzNSMRMzUjETM1IxEzNTMVMzUzFTM1MxUzNTMVMzUzFTM1MxUzNTMVIxUzESMVMxEjFTMRIxUzESMVMxEjFTMBFTM1MxUzNTMVMzUzFTM1FyMVMycjFTMnIxUzJyMVMwcVMzUzFTM1MxUzNTMVMzUFIxUzNxUzNTMVMzUzFTM1BRUzNSEVMzUHNSMVJRUzNRM1IxUjNSMVIzUjFSM1IxUHFTM1MxUzNTMVMzUzFTM1EzUjFSM1IxUjNSMVIzUjFQcVMzUzFTM1MxUzNTMVMzUTIxUzJyMVMycjFTMnIxUzASMVMycjFTMnIxUzJyMVMwEVMzUzFTM1FyMVMycjFTMHFTM1MxUzNQcjFTM3FTM1BRUzNRc1IxUXNSMVIzUjFQcVMzUzFTM1EzUjFSM1IxUHFTM1MxUzNRMjFTMnIxUzEyMVMycjFTMF1forampqampqampqampramtqa2pta2prampra2tra2tra2tra2tr+pVramtqa2pta2tr2Gpq1Wpq1Wpq1Wtqa2pram396Wpqa2pram1r/KlrAT9r1WsBqm1ra21qa2pramtramtqa2pta2ttamtqa2pra2pramtqbWtra9hqatVqatVqagIXbW3Xa2vVa2vVa2sC7GprampqatRra9Vqa2pqa2tqav5XatVq1Gpqa2pqa2pqampramprampqatRra2pqatVqav4UASFjASBjASJhASBjASFiASFgYGBgYGBgYGBgYGBgw2L+317+217+217+21z+3WAGaF5eXl5eXl5evmNjY2NjY2NiXl5eXl5eXl6+YWFhYWFhYWHFXl5eXl5eXl5eXv7fY2NjY2NjY2NiXFxcXFxcXFz+32NjY2NjY2NjXmBgYGBgYGBgBs1iYmJiYmJiASBiYmJiYmJi/t9eXl5evmNjY2JeXl5evmFhYWHFXl5eXl7DY2NjY2JcXFxc/t9jY2NjXmBgYGAGzWJiYgEgYmJiAAAAAQB7APYEWgTVAAMAEbUDAgQFAwAALy8REgE5OTEwEyERIXsD3/whBNX8IQACAAYAAATPBMkAAwAHAB5ADAUDAgYDBggJBQMEAAAvMi8zERIBOTkRMxEzMTATIREhExEhEQYEyfs3TAQxBMn7NwR9+88EMQABAG0BfwJoA3sAAwARtQEABAUBAgAvLxESATk5MTABIREhAmj+BQH7AX8B/AAAAAACAG0BfwJoA3sAAwAHAB5ADAcBAAQBBAgJBwEGAgAvMy8zERIBOTkRMxEzMTABIREhAxEhEQJo/gUB+0v+mwF/Afz+UAFi/p4AAAAAAQAAAIEIAALpAAMAEbUCBQMEAwAALy8RATMRMzEwESERIQgA+AAC6f2YAAABAZ4AAAZMBK4AAgARtQACAwQAAQAvLxESATk5MTAhAQEBngJYAlYErvtSAAEBkf/lBloErAACABO3AQIAAwMEAgAALy8REgEXOTEwCQIBkQTJ+zcErP2e/ZsAAAEBnv/lBkwEkwACABG1AgADBAECAC8vERIBOTkxMAkCBkz9qv2oBJP7UgSuAAAAAAEBkf/lBloErAACABG1AgEDBAEAAC8vERIBOTkxMAERAQZa+zcErPs5AmUAAgCoAKIELQQpAA8AHwAeQAwQAAgYABggIRQMHAQALzMvMxESATk5ETMRMzEwEzQ2NjMyFhYVFAYGIyImJjcUFhYzMjY2NTQmJiMiBgaod9F4e9F5edF7eNF3VmCoYmOqYmCsY2CqYAJkedN5edN5eNF5ec57YqpgYKpiY6piYqgAAAAAEABiAFYEXgRSAAcADwAXAB8AJwAvADcAPwBHAE8AVwBfAGcAbwB3AH8A/ECRWFxQaGhUbDh4eDx8KHBwLHQgYGAkZAhISAxMAEBABEQQMDAUNBgcHDRETGR0fGxcCYCBSnJ2TnbQduB2AkJ6fkZ+0H7gfgIyam42brBuARpaXh5eElJWFlaPVr9Wz1YDAjo+Bj7/PgEKKi4OLnZ+bl5WPi4uPlZebn52ByZmMGJAYgJiIjBwJgEvJj8mjyYDJgAvXV0ayS9dyREXOS8vLy8vLy8RMxDJMl0RMxDJMl0RMxDJMhEzEMkycREzEMkyXREzEMkyXREzEMkyERIBFzkRMxEzMxEzETMzETMRMzMRMxEzMxEzETMzETMRMzMRMxEzMxEzETMxMAEUIyI1NDMyJxQjIjU0MzITFCMiNTQzMhcUIyI1NDMyARQjIjU0MzIHFCMiNTQzMgEUIyI1NDMyARQjIjU0MzIBFCMiNTQzMgcUIyI1NDMyARQjIjU0MzIHFCMiNTQzMgEUIyI1NDMyJRQjIjU0MzITFCMiNTQzMicUIyI1NDMyA9czNzczkzQ5OTT3NzU3NSMzNzcz/jg2NTU2rTc1NTcCUjc1NTf9Gzc2NjcCgTM3NzOTNDk5NP2uNDk3NiM1ODg1Acc2NTU2/lw2Nzc29zc1NTeTNzY2NwOWNjY3KzU1N/7TNzc14zU1NQGUODg1WjU1N/13NTU3Abk2Mzr9QzU1OJozMzcCHTc3NeM1NTX+BDc3NuM1NTf+1TczNys1NTgAAAAAAQCyAIkEIwP6AA0AEbUKBA4PBwAALy8REgE5OTEwATIWFhUUACMiADU0NjYCam3Zc/7+t7b+/m/XA/p12Wq3/v4BArds1XcAAgApAAAErASDAAMAEwAeQAwEAAMMAAwUFQgAEAEAL80vzRESATk5ETMRMzEwMxEhEQEUFhYzMjY2NTQmJiMiBgYpBIP8BHfLdnXNd3fLd3bNdQSD+30CQnfLd3fNdXTNd3fNAAMAKQAABKwEgwADABMAIwAnQBIUAAMcHAwEAAQkJQggEBgAIAEAL80v3c4QzhESARc5ETMRMzEwMxEhEQE0NjYzMhYWFRQGBiMiJiYnFBYWMzI2NjU0JiYjIgYGKQSD/FJgqmJhqmJiqmFiqmBOd8t2dc13d8t3ds11BIP7fQJCYKpiYqpgY6pgYKpjd8t3d811dM13d80AAgBzAYUCYgN1AAwAGAAmQBITBgANBg0ZGhYAAxADAgMDEAkALzMzL10zERIBOTkRMxEzMTABFAYjIiY1NDYzMhcWBzQmIyIGFRQWMzI2AmKVY2aRk2RpRklLZ0ZFZ2NJTl8CfWuNkGhmkkpIZkZmZkZIZGgAAAAABQGw/+UGeQSsAAsAGAAkADAAOgBrQBMTBhkfJSsADAw2KzofBgY7PDUxuP/AQCkJDEgxNjgBODMzQAkQSCgcHC4iDyJPIl8iAzMiMyIWCRYDDx8JLwkCCQAvXTMvMxESOTkvL10RMzMRMysRM13GKzIREgEXOREzETMRMxEzMTABFAAjIgAnNAAhIAAHNAAjIgcGFRQAMzIAARQGIyImNTQ2MzIWBRQGIyImNTQ2MzIWARYzMjcXBiMiJwZ5/pf8+/6ZAgFiAQIBAwFiWv7P2tmXmgEz19oBMf1aLSEhLS0hIS0B0yshIS8vISEr/elMk5JMPWC7uGICSP7+mwFn/PoBav6W+tkBM5qZ2df+zAE0AVYfLy8fIC0tIB8vLx8gLS3+v4mJI7q6AAAABAHR/+UGmgSsAAsAFwAjAC0AU0A0ABgoHgwkEgYILi8pLQAtASEbFQ8PD08PXw8DLSbwKwEPKwErQA0QSCsPKw8JAx8JLwkCCQAvXS8SOTkvLytdXc7NXRDOMzJdETMREgEXOTEwARQAIyIAJzQAISAABTQmIyIGFRQWMzI2JTQmIyIGFRQWMzI2ARYzMjcnBiMiJwaa/pf8/v6cAgFiAQIBAgFj/QAwHiEtLSEeMAHTLh4hLy8hHi79rmK4uWI+S5KTTAJI/v6bAWf8+gFq/pZ7IC0tIB8vLx8gLS0gHy8v/tu6uiOJiQAAAAACAUb/cwYOBDsAKQA1AHBAPQgPDzMkHR0tJRwtIh8nGgIWFikXBRIKDQ0zBxAMEDMSFxofHCAJNjciDR8DChIPCggFJCcHAjAYFSooKQIALzMayS8zyRIXOS8XMxESARc5ETMRMxEzETMRMzMRMxEzETMzETMRMxEzETMRMzEwATMVFhYXNxcHFhczFSMGBxcHJwYGBxUjNSYnByc3JicjNTM2Nyc3FzY3FyIGFRQWMzI2JzQmA4lCQWU7ui24VgbX1xBMuDG2MldYQnlkvCu2ThDX1wxQtCm8b3Afi8HDiYvGA8UEO9kGJy22LbhxdD59YLwrtiUqDdnZEEq0LbhkfT6BXrgxtk4MPceHh8XIhIfHAAACAdkAUAQnBIEAFwAkAFRAKxAKFRsDDhISFxMKIiITAwMlJhEVFQ4WDQAAHh8eLx4CFh4WHgZAEwETGAYALzMvXRI5OS8vXREzETMRMzMRMxESARc5ETMRMzMRMxEzMxEzMTABJiY1NDYzMhcWFRQGBxUhFSERIxEhNSETIgYVFBYzMjc2NTQmAttxia5xd1RWkmgBAP8ATP7+AQIlWHd7VFY7PncCQhKiaH2mVlR5bKIOpkb++gEGRgKReFVWeT49VFZ3AAAAAAIBUgD6BK4EgQAsADgARkAjFxQEHzAnHywhNhQeHgA2LCcFOToeABoIDywfKjMkLSQPAyoALxczLzMSOTkvxMQ5ORESARc5ETMRMxEzETMRMxEzMTABJicmNTQ3NjMXFjMyNzYzMhUHBhUUFxcUBwciJiYnBxYVFAYjIiY1NDYzMhcHIgYVFBYzMjY1NCYEAJMrCQYHCCFDPFgpIg8OBBAMBAQOFSUjDutUsXJ1rKh7RVSZWnt9WFh7fQQAKysEDgkIBAQRDQwOGztjTTQgCQYGQlox7lJsfa6keXiqKyB5Wl92fVhYewABADsAAAQEBM8AIQApQBYGEAsXERwGIiMLFxcJDxkfGQIZGREAAC8vOS9dMzkRMxESARc5MTABFhYXFhYXFAYjIiceAhcXITcyNjY1NQYjIiY1NDY3NjYCIRhhlY1GAoFYnGQEUKKFBvzqBnusWFqqW4FYZYmFBM9gqIx/g0dhf7+gpl4IJSVgrJIOv39dWodSd7oAAQA7AAAFBATHADMAQ0AmJwAdHy4HExcBDgo0NSoLDwsfCwIuCB8TIxMIEQ8RAQsRCxEaARoALy8SOTkvL10SOTkyETMRM10RMxESARc5MTAhITc+AzUnBgYjIiY1NDY3MhcmJyY1NDYzMhYVFAc2NzYzMhcWFRQGIyImJiceAxcERvy2CId3XjYDObBac6KUXD1lJRILonF0oEVUEBYnaUNKnHQ4dl89BDFvf3AjGjh3lUwveXWdenOdAjNCJyQneZaga1ZiJwQITkt1daQyUWl9mng2FAAAAQBm/+kEWgR5ABgAGEAJBxMZGg0QAAoQAC8zLxI5ERIBOTkxMAUmJicnJiY1NDYzMhYXNjYzMhYVFAYHBgYCYhZasFtLNoxkVo8nIY9YYY9Yb42BF1a363tlgUFriXN3d3WHY1a+ibPVAAAAAQBC/+cD0wTHAAsAEbUJAwwNBgAALy8REgE5OTEwARYAFwYAByYAJzYAAgZKAQh7Rv7PVCv++pV0AQIEx33+l4lG/mmUUgFtsokBWAAAAAABAMUAHQM7BIEAGQAuQBUICgIODhkFCgoZFAMaGxcRgAgIEQAALy85LxoQzRESARc5ETMRMxEzETMxMAEzFRcWFRQHIzY1NCYnERQGIyImNTQ2MzIXAelMmmxeLzlyQJNrOTl9TSsvBIFkwZOqlnl/eXegCv4Ge5c3LU5zEwAAAAIBEP/VBPAEhwAaAB4AQkAjGw0NGAAcCgoFGBMEHyAIAwwbCxwDGRwdGx4LDAgaFoAQHRoALzMvGs0SFzkRMxEzL80REgEXOREzMxEzETMxMAEUBiMiNTQ2MzIXEQURFAYjIiY1NDYzMhcRJQElNQUE8Jtfc3tOLyv92YlzOTp3SjYuArv9jwIn/dkBRH+UZVFvEgHAlf52dJw1LUx1EwLwsv5nlXWYAAIAZv83BAIFzQAbAB8AdUBFDQkSHwYGDwsHFhoCAhMcAxgAAAMHCQQgIQgKCx8FBBwBGgAKGwkODA8eHRITFhkYCg0XCRcQEBcJAwcUAwAHAWAHAQcUAC8vXXEvERIXOS8vLxDNFzkQzRc5ERIBFzkRMxEzMzMRMzMRMzMzETMzETMxMAEHESMRBREjEQc1NxEHNTcRMxElETMRNxUHETcFEQURBALJYP62YMnJyclgAUpgycnJ/tf+tgG8WP6cAT2f/pkBQGCfXgH2YKBgAUb+4aABXP7LXp5g/gpagQH2oP4KAAEAAAAABD8FtgAVAHZAQQYCAgsPExMIBAANERURAAMWFxICCwIBEAYDAmhZD38DAQADAQkDAwcOBgcGaFkLDwd/BwILAwcHAAkDABNpWQASAD8rABg/EjkvX15dMysRADMYEMZfXl1dMisAX15dETMREgEXOREzETMzMxEzMzIRMzEwMxEjNTM1IzUzESERIRUhFSEVIRUhEbi4uLi4ATYBPf7DAT3+wwJRAcewl6wB/P4ErJewx/8AAAAAAQAAAAACcQYUABMAb0A+AgYGFQ8LCwAECAgRDQkJFBUHCwwLaFkEfwwBAAwBCQMMEAMPEA9oWQC2EAGlEAFgEAEPEB8QAhAQCRIACRUAPz8SOS9dXV1dMysRADMYEMZfXl1dMisRADMREgE5ETMzMxEzMzIRMxEzETMxMAEzFSMVMxUjESERIzUzNSM1MxEhAdGgoKCg/s+goKCgATED46ybrP4QAfCsm6wCMQAAAAAB/8UAAAQ/BbYAGgBbQDYGEg0YGAoAABobHBEDAAkB8AkBDwkfCW8JfwkECQkPBQkDDxUfFT8VAxMDFRUACwMAGGlZABIAPysAGD8SOS9fXl0XMy9eXV1xMzMREgE5OREzMxEzMzIxMDMRJiMiByM2NjMXESERFjMyNzMGBiMiJxEhEeEcE0wMlQKFciMBNhUeShKVBIlvGBACKAKeBm2OpQQCUP1QBm2Sogb+wf8AAAIAFAAABKoFtgAOABsAfkBJERcIBg8TBQUKBgAXBhccHRIICQhxWQ8JJBkbSA8JAQ0FCQkECwQTaVkgBAECEAQwBHAEgAQEAwQECwYSDxtvGwIMBgsbaVkLAwA/KwBfXl0YPxI5L19dX10rERIAORgvX15dKzMrEQAzERIBOTkRMxEzMxEzMxEzETMxMAEUBCEjESERIzUzESEgBAUzFSMVMzI2NTQmIyMEqv7Z/vCF/sqkpAHTAQoBFf1E6elmj453f40D7uz6/fgDgccBbuWJx3txbG1oAAIAuP4UBUgFtgAXACAAV0AuFSIUBxwYAQEMERwMHCEiFAAYGABpWQAYEBgCEAMYGA0WEg0gaVkNAwoEa1kKIwA/KwAYPysAGD8SOS9fXl0rERIAORESATk5ETMRMxEzETMzETMxMAERFBYzMjcVBgYjIBERISAEFRQGBwEhASczMjY1NCYjIwHuPDA+MxlfNP6ZAaoBKgEejoIBrv6o/qOlZJOMj5ZeAjH9Uj88FeoLFAFpBjnZ3YHJOf2DAjH8YmloWAAEAFb+VgQ7BhQAGwAhACYAKwCPQE8AAQ0MIhciEB4kHSUqEwEaBCshISkaFBMlJBAILC0MIikrFCQlHhwTKwEHCgMNGSscZ1kfKwErKw0ZGwAFFQ8UAQwGGRRgWRkQDR5fWQ0WAD8rABg/KwBfXl0YPz8REjkvXSsREgAXOREzERI5ORESORg/ERIBFzkRMzMRMxEzETMRMxEzMxEzMxEzMTABAwQRESMnIwYGBwMjEyYmNTQ2NzcGByc2MzMTEwcDNjY1BRQXEwYBNCcHNwOBYAEa1TsIPHNPXJ5ak5zl6DGGq2XB6x1cKXJGVmL+gy06ZwF9JSZLBhT+TEb+z/0XmEtJDv5gAZgNuZqqqQ/jA07OZQGf+/IE/s0PdVdeRSIBACoBLkkuqgIAAAACAC/+VgN9BhQAGgAdAGtAOAAfBRwODxMRGBsbEQoMARkDBAQZHAwPEQYeHxsdBw4iGgAXGEATBB0YHWBZFQEYDw8MDAddWQwWAD8rEQAzGD8zMysRADMzGhgQzT8/ERI5ERIBFzkRMxEzETMRMxEzETMRMxEzETMxMAEDMxUjAxczMjcVBiMDIxMmEREjNTc3MxUzEwMTIwN9YhBEkxESUHByplqeXsSSqFjDi2LtWFgGFP5K5f1oAiPjM/5qAapBAR0CG4Fm7O4BtvvRAZT//wC4/lYGkQW2AgYCqAAAAAEAoP5vBU4GFAAZAERAIxMQDAwNAAUCAwMFDQMaGxMFFgMOAA0VFghdWRYQBQBfWQUVAD8rABg/KwAYPz8vERI5ERIBFzkRMxEzETMRMzMxMCUzESERIxE0IyIGFREhESERFAcHMzYzMhYVBKim/u3EtIBy/s8BMQcHEGbexczf/ZABkQKN8q7D/fIGFP7DJYlapNTGAAAAAQC4/lYFZAW2ABAARUAkDwIMCAgJEAYCAwMGCQMREgYQDAMHBwkOCgMJEgMiBQBqWQUSAD8rABg/Pz8zEjkRFzMREgEXOREzETMRMxEzETMxMAEzESERIwEHESERIRE3ASEBBKi8/tVJ/oGD/soBNnoBjAFY/gIBCv1MAaoCaF799gW2/V6vAfP9eQABAKD+bwT2BhQAEgBDQCMDBxIOBAsHCAgLDgMTFAsEAAMMDAoCCA8AAg8OFQoFX1kKFQA/KwAYPz8/LxESOREXMxESARc5ETMRMxEzETMxMAE3ASEBATMRIREjAQcRIREhEQcByYEBOQFY/kQBMab+7U3+voP+zwExEAJgqgFU/hv+Zv2QAZEBxWn+pAYU/Ur+AAAAAQAx/lYEcQW2AAsAQ0AiBQEJAAYDAAEBCgMDDA0BIgkGBwcGaVkHAwQKAwMKaVkDEgA/KxESADkYPysREgA5GD8REgEXOREzETMRMxEzMTABIREhNQEhESEVASEEcf7V/OsCvf1WBBr9RALP/lYBqskD7QEAyPwSAAAAAAEAN/5vA6oEXgALAEJAIQUKAQkAAAEBBgMDDA0BAwkGBwcGXlkHDwQKAwMKXlkDFQA/KxESADkYPysREgA5GBDGERIBFzkRMxEzETMzMTABIREhNQEhNSEVASEDqv7u/Z8CBv4ZA0L+CAIK/m8BkbQCwenG/VEAAAIAd//sBXsFzQASAB8AOEAcHQQOCxYEFiAhChEPDAMPEggaaVkIBAATaVkAEwA/KwAYPysAGD8/Ejk5ERIBOTkRMzMRMzEwBSImAjU0EjYzIBczNyERIScjBgMyNjU1NCYjIgIVFBYCpKj9iIn/qQEHjAkeARn/AEAQgKmln6SmlqqqFLUBVObnAVW2spv6SqC0AQTz9kDd0/756+z7AAABAAAAAATLBHMAFgAiQBABFxAYBQABDwwSYVkMEAAVAD8/KwAYPxI5EQEzETMxMCEBIRMWFzM2NxM2NjMyFhcVJiMiBgcDAar+VgE/2CEKCAYpgSmScCxjFycfOUon8gRe/YNofWp7AZiEdhQL7AtdbP1WAAEAAAAACA4FywAnACpAFQsoISkFDxcDChMLAwEKEh4jaVkeBAA/KwAYPzM/MxIXOREBMxEzMTAhIQMmAicGBgcDIQEhExYXNjY3EyETFhM2EjcTNjYzMhcVJiMiBgYHBkj+n8YLNQQGMA3F/qD+iwExuzEWBisT1QEl1RkkCCwMgySYeUpJJCEqLSgRAwApAQEsNu8z/QIFtvzi3aI570IDM/zNYf73SwEEMAIWkosf9BMnXEoAAAABABQAAAcXBHMAJAArQBYeByUmAwsVAwYQGyBhWRsQBxAPAAYVAD8zPzM/KxESABc5ERIBOTMxMCEDAyMDAyEBIRMWEzM2NzcTIRMWFhczNhM2NjMyFxUmIyIGBwMEN1Z0CUOH/rj+wgEwgRYnCAQfEIoBUIMSHwIID24ljnxVQSMbP0QZugGHAe7+1v21BF7+EVj+6UylVQIY/ehNxTR8AcyakR/sC1dg/UQAAgAUAAAEjQRzABoAJABVQC0ZJhsJEQEPHx8dAQYJAwYlJhQADB0GIgQRAQwDAwRoWQMDABgPDCJyWQwQABUAPz8rABg/EjkvKxESADk5ERI5ORESORESARc5ETMRMxEzETMxMCEDBiM1MjcmJjU0NjMyFhUUBxcWFzM2NxMhAQEUFzY1NCYjIgYBqslWd1g2HxSaenmXtisqEAgGJ9cBP/5W/lpERicfHiYB5RCqCFNfLXuSjXPMdGZkbXJzAn37ogNoOlkzWiwuLwAAAQC4AAAEMQW2AAcAXkA6BgICAwMACAkABgEMBgYBaVlGBgHWBgESBgEDIQYBsQYBBKMGAUwGATsGARkGAQgGiAYCBgYDBAMDEgA/PxI5L11dXV1dX11xX3FdcSsAX15dERIBOTkRMxEzMTABIREhESERIQQx/b3+ygE2AkMCd/2JBbb9wwAAAAEAoAAAA3sEXgAHAERAKgEFBQYGAwgJAQRgWQEkHyBIygHaAQIGAQF2AQEBJA1JGAEBAQEGBw8GFQA/PxI5L10rXXFdKysREgE5OREzETMxMAERIRUhESERAdEBqv5W/s8EXv5S4/4zBF4AAAIAXP/sBfoEdQAJAB8AO0AeERYUDQcWHQAAFg0DICEQEBoDX1kaEAcWChZdWQoWAD8rEQAzGD8rABg/ERIBFzkRMxEzETMRMzEwATQmIyIGFRE2NgEgABE0EjcXBgYVEAURNDYzMgAVEAAE1V5aOUCHqv4//q/+mW933VpKAQ7WuuEBAP5wAk6cp09h/fwMx/48ASoBFZoBH5GQes93/uw0AgK51/7c+/7h/rsAAAAAAgAhAJgCkwPsAAMABwAsQBYDBwEFBwUICQIEAgRfBm8GAgYPAAEAAC9dL105OS8vERIBOTkRMxEzMTABFwEnJRcBJwJKSf3XSQIpSf3XSQPsbf6FbQ5t/oZsAAAC/38FGQGRB74AAwAPAC1AIAGPAJ8ArwADEAAwAAIADV8HbwefB68HBDAHsAfABwMHAC9dXTMvXV3NMTABESMRAxQGIyImNTQ2MzIWAZG+UEY7OUpKOTtGB779WwKl/qw/PT0/PD8/AAL/fwRmAg4G5QADAA8AK0AfAw8BLwGvAQMBDR8HAQ8HHwcvB18Hbwd/B58HrwcIBwAvXXEzL13NMTABAScBBRQGIyImNTQ2MzIWAg7+IYcB3/78Rjs5Sko5O0YGRv4giAHfYz89PT88Pz8AAAL+rgTZAVQG5QADAA8AKUAdAg8BXwECAQ0fBwEPBx8HLwdfB28HfwefB68HCAcAL11xMy9dMzEwASE1IScUBiMiJjU0NjMyFgFU/VoCptFGOzlKSjk7RgTZv9I/PT0/PD8/AAAB/vAEwwEQBhcABQATQAoDAQ8AXwCvAAMAAC9dMjIxMAE1ITUzEf7wAbRsBMNs6P6sAAAAAQCPBKwDVAc7AAYAMkAgAwQBBAYDBwgEAgAEAy8GAQ8GTwZfBn8GvwbPBu8GBwYAL11xFzMvERIBFzkRMzEwAQEhESMRIQHyAWL+65v+6wc7/nv+9gEKAAEAjwSPA1QHHwAGADBAHwUCBgIBAwcIBAIABAMvBl8GbwZ/Bq8GvwbPBu8GCAYAL10XMy8REgEXOREzMTABASERMxEhAfL+nQEVmwEVBI8BhQEL/vUAAAACAIECoAGoBvQAAwAPACxAFgIEAwoEChARSQEBKwE7AQIBBwICBw0ALzMzLxI5XV0REgE5OREzETMxMAEjAyEBNDYzMhYVFAYjIiYBcbk3ASf+3UlEREpMQkBNBDMCwfw1QklJQkJHRgAAAAACAIECtAGoBwgAAwAPACpAFQoDBAIDAhARRgABJAA0AAIABwcNAwAvxDISOV1dERIBOTkRMxEzMTATMxMhARQGIyImNTQ2MzIWuLk3/tkBI0pDREpMQkBNBXX9PwPLQ0hIQ0JHRgAA//8AgQFZAagFrQAHCH8AAP6lAAAAAf7wBMMBEAYXAAkAF0AKBAAIAQgCBggGCQAvMzMRMy9dMzEwARUjFSM1IxUjEQEQvm2JbAYXbefn5wFUAAAAAAH+8AAAARABVAAJABK2AwgFAQgBAAAvMjIRMy8zMTAhNTM1MxUzNTMR/vC+bYlsbefn5/6sAAD//wCe/lMEA/+bAQcBS//k+XoAGrEACbj/wEANCQtIAAlQCXAJoAkECQARXSs1AAAAAgBiAH0BwQRzAAsAFwAtQBgMABIGAAYYGQ8VfVkPEAkDfVkACRAJAgkAL10rABg/KxESATk5ETMRMzEwEzQ2MzIWFRQGIyImETQ2MzIWFRQGIyImYlpWU1xdUlRcWlZTXF1SVFwBJ1RWV1NRWVgC9FRWV1NRWVgAAgBmAVwDZgO6AAMABwAyQBwABAMHBAcICUYFVgUCBSAEYAQCBEgAWAACBAABAC8zM10vXTNdERIBOTkRMxEzMTATNSEVATUhFWYDAP0AAwAC4dnZ/nvb2wAAAAABAHkB5QHTBbYAAwAStgIDBAUBAgMAP80REgE5OTEwASMDIQGg9DMBWgHlA9EAAAABAIEC9gGoBbYAAwAUtwMCAgQFAQIDAD/NERIBOREzMTABIwMhAXG5NwEnAvYCwAAD/rQEywGDBwwACAAUACAALUAcA4CAAAEAACAAMACgAAQAABgMDB4PEl8SrxIDEgAvXTMzETMzL11xGswxMAM1NjchFQYGBwU0NjMyFhUUBiMiJiU0NjMyFhUUBiMiJoVqUgFMMsRF/mxGOzlKSjk7RgGURjs5Sko5O0YF4xt4lhQ5rS+dPz4+Pzw/Pzw/Pj4/PD8/AAAAAAP+fQTLAUwHDAAIABQAIAAtQBwEgIAAAQAAIAAwAKAABAAAGAwMHg8SXxKvEgMSAC9dMzMRMzMvXXEazTEwAyYmJzUhFhcVBTQ2MzIWFRQGIyImJTQ2MzIWFRQGIyImSEbCMwFMUmr+L0Y7OUpKOTtGAZRGOzlKSjk7RgXjL6s7FJZ4G50/Pj4/PD8/PD8+Pj88Pz8AAAAAAf/6BhQEBgbdAAMALkAfAAUBBAEPAh8CfwKPAgQPAp8CrwLfAu8CBQJACxBIAgAvK11xMxEBMxEzMTABITUhBAb79AQMBhTJAAAAAAH82wSyAAoGSAAIAB1AEwICAA8HTwdfB58HrwfPB+8HBwcAL10zMy8xMBEgByM2JCEzFf5bs81fAZoBLAoFfcvIzssAAf/2BLIDLwZIAAkAHUATBAQIDwBPAF8AnwCvAM8A7wAHAAAvXTIyLzEwESAEFyMmJCMjNQFDAZtRzVT+xtQKBkjPx2FqywAAAfzXBNsAAAYUAA0AJUAXAA0ABwEHaQN5AwIHAy8KrwrvCv8KBAoAL10zM10vXS4zMTARJiYjIgYHIzY2MzIWF3XgeEpoFJYRwZZ/22cFEBcjOTaWox8UAAEAAATdAysGFAALAClAGwsADwXvBQIFZgN2AwIFAyAJUAmgCeAJ8AkFCQAvXTMzXS9dLjIxMBEWFjMyNzMGBiMiJc+nV50slRHAmbX+9AXhJxJslKMzAAABAKAAAAO2BYEACQAkQA8ABwEEAQoLCAUCBwUBBAUALzMvEjk5EMQREgE5OREzMzEwISMRASE1IQERMwO2h/6k/s0BZAErhwOeAVyH/tUBKwAAAAABAKAAAAO2BYEACQAkQA8ABwEEAQoLCAUCBwUBBAUALzMvEjk5EMQREgE5OREzMzEwISMRASE1IQERMwO2h/6P/uIBZAErhwLVAiWH/kYBugAAAAABAKAAAAO2BYEACQAkQA8ABwEEAQoLCAUCBwUBBAUALzMvEjk5EMQREgE5OREzMzEwISMRASE1IQERMwO2h/6F/uwBbgEhhwGgA1qH/VoCpgAAAAABAKAAAAO2BYEACAAgQA0ABgMGCQoHBAYEAQMEAC8zLxI5EMQREgE5OREzMTAhIwEhNSEBETMDtof+f/7yAW4BIYcE+of8RAO8AAAAAQBOAAADtgWBAAgAIEAOAAEEAQkKAgYDAwcBBQcALzMvEhc5ERIBOTkRMzEwISMRAQE3AQEzA7aH/rj+Z1wBPQFIhwTH/swBhmD+1QEzAAABAFIAAAO2BYEACQAiQA4ABwEEAQoLAwYGCAEFCAAvMy8SOS8zERIBOTkRMzMxMCEjESEBNwEhETMDtof+lP6PYgFIATOHA6gBb2D+uAFSAAEATgAAA7YFgQAHACBADQAFAQMBCAkCBQYBBAYALzMvEjk5ERIBOTkRMzMxMCEjEQE3AREzA7aH/R9eAoOHAkoCzV79mQJzAAEATgAAA7YFgQAJACBADQAHAQQBCgsCBwgBCAUALzMvEjk5ERIBOTkRMzMxMCEjNQEBNwETETMDtof+uP5nUgG024fsAqUBhmr+aP4wA2gAAQBGAAADtgWBAAgAHEALAAYDBgkKBgcBBwQALzMvEjkREgE5OREzMTAhIwEBNwETETMDtof+pv5xWgGu4YcDogF5Zv5r/ZsD+gAAAAEARgAAAnEFgQAHACBADQAFAQMBCAkCBQYBBgQALzMvEjk5ERIBOTkRMzMxMCEjEQE3AREzAnGK/l9aAUeKA54Bf2T+1wEpAAEANQAAA7YFgQAIACBADgABBAEJCgIGAwMFAQcFAC8zLxIXORESATk5ETMxMCEjEQEBNwEBMwO2h/60/lJrAUcBSIcEef3XAuNO/d8CIQAAAQA1AAADtgWBAAkAIkAPAAcBBAEKCwIGAwMFAQgFAC8zLxIXORESATk5ETMzMTAhIxEBATcBAREzA7aH/rT+UmsBYAEvhwOi/rAC40z9sgEtASEAAQBQAAADtgWBAAkAIkAOAAcBBAEKCwMGBgUBCAUALzMvEjkvMxESATk5ETMzMTAhIxEhATcBIREzA7aH/qj+eXMBZAEIhwKcAp1I/aICXgABAC0AAAO2BYEACQAgQA0ABwEEAQoLAgcFAQgFAC8zLxI5ORESATk5ETMzMTAhIxEBATcBFxEzA7aH/p7+YHcBk/iHAQ4BYwLKRv1G+AOyAAEASAAAA7YFgQAGABxACwAEAgQHCAQDAQUDAC8zLxI5ERIBOTkRMzEwISMBNwERMwO2h/0ZdgJxhwU9RPuaBGYAAAAAAQAtAAACcQWBAAcAIEANAAUBAwEICQIFBAEGBAAvMy8SOTkREgE5OREzMzEwISMRATcBETMCcYr+RnMBR4oCVgLjSP3fAiEAAQAlAAADtgWBAAgAIEAOAAEEAQkKAgYDAwUBBwUALzMvEhc5ERIBOTkRMzEwISMRAQE3AQEzA7aH/rj+PnsBRwFIhwQZ/N0EWjH83QMjAAABACUAAAO2BYEACQAjQBAABwEEAQoLAgYDBwQFAQgFAC8zLxIXORESATk5ETMzMTAhIxEBATcBATUzA7aH/sP+M38BWAEzhwOg/YUEJzX85wJnsgABACUAAAO2BYEACQAjQBAABwEEAQoLAgYDBwQFAQgFAC8zLxIXORESATk5ETMzMTAhIxEBATcBAREzA7aH/rL+RHsBcgEdhwJW/rAESjH8fQESAnEAAAAAAQAnAAADtgWBAAkAIkAOAAcBBAEKCwMGBgUBCAUALzMvEjkvMxESATk5ETMzMTAhIxEhATcBIREzA7aH/pb+YnkBgwEMhwFSA/4x/FgDqAABAC8AAAO2BYEACAAcQAsABgMGCQoGBAEHBAAvMy8SORESATk5ETMxMCEjAQE3ARcRMwO2h/64/kh/AazVhwFIBAI3/BXYBMMAAAAAAQAlAAACcQWBAAcAIEANAAUBAwEICQIFBAEGBAAvMy8SOTkREgE5OREzMzEwISM1ATcBETMCcYr+PnsBR4r0BFwx/N0DIwAAAQAdAAADtgWBAAgAIEANAAEEAQkKBgIFAQMHBQAvMy8zEjk5ERIBOTkRMzEwISMRAQE3AQEzA7aH/rj+Nn0BTQFIhwPX/CkFUi/8KQPXAAABABcAAAO2BYEACQAkQBAABwEEAQoLAgYHAwgBAwUIAC8zLzMSFzkREgE5OREzMzEwISMRAQE3AQE1MwO2h/64/jB/AVgBQYcDdfyLBVAv/BcDaoEAAAAAAQA1AAADtgWBAAkAJEAQAAcBBAEKCwIGBwMIAQMFCAAvMy8zEhc5ERIBOTkRMzMxMCEjEQEBNwEBETMDtof+uP5OfwFOAS2HAo/9cQVUK/vjAm8BsAAAAAEANQAAA7YFgQAJACRAEAAHAQQBCgsCBgcDCAEDBQgALzMvMxIXORESATk5ETMzMTAhIxEBATcBJREzA7aH/rj+Tn8BdQEGhwE3/skFVCv7ffoDiwAAAAABAC8AAAO2BYEABwAaQAoABQIFCAkEAQYDAC8zLzMREgE5OREzMTAhIQE3ATMRMwO2/iv+ToEBlOuHBVQt+wYE+gAAAAEAKQAAAnEFgQAGABxACwAEAgQHCAQDAQUDAC8zLxI5ERIBOTkRMzEwISMBNwERMwJxiv5CgQE9igVULfwpA9cAAAAAAQBQAAADtgWBAAcAHEALAAEFAQgJBAYBAwYALzMvEjkREgE5OREzMTAhIxEhAScBIQO2h/7N/rRgAXMB8wT6/rZiAW8AAAAAAQBMAAADtgWBAAkAI0AQAAcBBQEKCwMHBAIEBgEIBgAvMy8SFzkREgE5OREzMzEwISMRAQEnAQERMwO2h/62/sNcAZsBSIcDkQE2/ttmAXn+zQEzAAAAAAEATAAAA7YFgQAJACNAEAAHAQUBCgsDBAcCBAYBCAYALzMvEhc5ERIBOTkRMzMxMCEjEQEBJwEBETMDtof+mP7bVgGbAUiHAikCgf72aAF5/cECPwAAAAABAEwAAAO2BYEACQAjQBAABwEFAQoLAwQHAgQGAQgGAC8zLxIXORESATk5ETMzMTAhIzUBBScBAREzA7aH/oP+7lQBmwFIh+kDrfZoAXn80wMtAAABAF4AAAO2BYEACAAgQA4ABgQGCQoCAwYDBQEHBQAvMy8SFzkREgE5OREzMTAhIwEFJwEBETMDtof+jf7yUAGcATWHBInnZgF5/D0DwwAAAAEATAAAAnEFgQAGABxACwABBAEHCAMCBQEFAC8vEjk5ERIBOTkRMzEwISMRAScBMwJxiv7DXgGbigTD/t1oAXkAAAAAAQCgAAADtgWBAAgAIkAOAAEEAQkKAgEEBQUHAQcALy8SOS8zETkREgE5OREzMTAhIxEFITUhATMDtof/AP5xAVQBO4cExeKHARcAAAEAoAAAA7YFgQAJACZAEAAHAQQBCgsCBwEEBQUIAQgALy8SOS8zETk5ERIBOTkRMzMxMCEjEQEhNSEBETMDtof+nP7VAWABL4cCaAF7h/7DAlQAAAEAoAAAA7YFgQAJACZAEAAHAQQBCgsCBwEEBQUIAQgALy8SOS8zETk5ERIBOTkRMzMxMCEjEQEhNSEBETMDtof+nv7TAX8BEIcBNQKuh/3yAyUAAAEAoAAAA7YFgQAIACJADgAGAwYJCgYBAwQEBwEHAC8vEjkvMxE5ERIBOTkRMzEwISMBITUhExEzA7aH/qz+xQGd8ocD44f9QAPXAAABAFAAAAO2BYEACAAfQA4AAQQBCQoCBQYDBAcBBwAvLxIXORESATk5ETMxMCEjEQEBNwEBMwO2h/64/mlgAR8BYIcEef3VAaBi/tsCVgAAAAEATAAAA7YFgQAJACJAEAAHAQQBCgsFAgYDBwUIAQgALy8SFzkREgE5OREzMzEwISMRAQE3AQERMwO2h/64/mVgATsBSIcDsP64AZxg/scBRgEQAAEARgAAA7YFgQAJACRADwAHAQQBCgsFAQMGBggBCAAvLxI5LzMRORESATk5ETMzMTAhIxEhATcBIREzA7aH/mf+sGIBKwFchwKcAXRd/rYCXgAAAAEATAAAA7YFgQAHACBADgAFAQMBCAkEBQIDBgEGAC8vEhc5ERIBOTkRMzMxMCEjEQE3AREzA7aH/R1eAoWHAScC42X9egOYAAEATAAAA7YFgQAIABxACwAGAwYJCgYEBwEHAC8vEjk5ERIBOTkRMzEwISMBATcBExEzA7aH/rj+ZVwBrtmHAnMBmWX+Uv5kBFoAAAABAEwAAAJxBYEABwAgQA4ABQEDAQgJBAUCAwYBBgAvLxIXORESATk5ETMzMTAhIxEBNwERMwJxiv5lXgE9igJkAZxk/sMCWgABAC8AAAO2BYEACAAfQA4AAQQBCQoFAgYDBAcBBwAvLxIXORESATk5ETMxMCEjEQEBNwEBMwO2h/64/kh1ATcBVIcEHfzlA2BI/ZgDPwAAAAEAOQAAA7YFgQAJACJAEAAHAQQBCgsFAgcGAwUIAQgALy8SFzkREgE5OREzMzEwISMRAQE3AQE1MwO2h/64/lJ1ATkBSIcDnv1wA0hG/aECkLQAAAEAPwAAA7YFgQAJACJAEAAHAQQBCgsFAgcGAwUIAQgALy8SFzkREgE5OREzMzEwISMRAQE3AQERMwO2h/7D/k13AVwBHYcCc/6kA1g9/VQBOQJIAAEAPwAAA7YFgQAJACRADwAHAQQBCgsFAQMGBggBCAAvLxI5LzMRORESATk5ETMzMTAhIxEhATcBIREzA7aH/qT+bHcBbwEKhwFWAxk9/TEDpAAAAAEAPwAAA7YFgQAIABxACwAGAwYJCgYEBwEHAC8vEjk5ERIBOTkRMzEwISMBATcBFxEzA7aH/rT+XHcBmt+HATUDOj383csEwwAAAAABAD8AAAJxBYEABwAgQA4ABQEDAQgJBAUCAwYBBgAvLxIXORESATk5ETMzMTAhIxEBNwERMwJxiv5YdwExigEtA0I9/aoDKwABADUAAAO2BYEACAAgQA4AAQQBCQoFAgYDBwEDBwAvLzMSFzkREgE5OREzMTAhIxEBATcBATMDtof+uP5OfQE1AUiHA9f8KQUIMfxxA9cAAAEANQAAA7YFgQAJACNAEAAHAQQBCgsFAgYHBAgBAwgALy8zEhc5ERIBOTkRMzMxMCEjEQEBNwEBNTMDtof+uP5OfQE8AUGHA3X8iwUIMfxdA2qBAAEANQAAA7YFgQAJACNAEAAHAQQBCgsFAgYCBAgBAwgALy8zEhc5ERIBOTkRMzMxMCEjEQEBNwEBETMDtof+uP5OfwFOAS2HAo/9cQUKLfwrAm8BsAAAAAABADUAAAO2BYEACQAjQBAABwEEAQoLBQIGBwQIAQMIAC8vMxIXORESATk5ETMzMTAhIxEBATcBJREzA7aH/rj+Tn8BdQEGhwE3/skFCi37xfoDiwABADUAAAO2BYEABwAeQAwABQIFCAkDBAYEAQYALy8zERI5ERIBOTkRMzEwISEBNwEzETMDtv4x/k5/AZzfhwUKLftQBPoAAAABADUAAAJxBYEABgAcQAsABAIEBwgEAwUBBQAvLxI5ORESATk5ETMxMCEjATcBETMCcYr+Tn8BM4oFCi38eQPRAAAAAAEATAAAA7YFgQAHABxACwABBQEICQQGAQMGAC8zLxI5ERIBOTkRMzEwISMRIQEnASEDtof+4f6qbgF3AfME+v3CSgJ7AAAAAAEATAAAA7YFgQAJACNAEAAHAQUBCgsDBwIEBAYBCAYALzMvEhc5ERIBOTkRMzMxMCEjEQEBJwEBETMDtof+0f66bgGbAUiHA5EBH/4MSgJ7/s0BMwAAAAABAEwAAAO2BYEACQAjQBAABwEFAQoLAwcCBAQGAQgGAC8zLxIXORESATk5ETMzMTAhIxEBAScBAREzA7aH/rD+224BmwFIhwIpAlT+P0oCe/3BAj8AAAAAAQBMAAADtgWBAAkAI0AQAAcBBQEKCwMEBwIEBgEIBgAvMy8SFzkREgE5OREzMzEwISM1AQEnAQERMwO2h/6c/u9uAZsBSIfpA3X+XkoCe/zTAy0AAQBMAAADtgWBAAgAIEAOAAYEBgkKAgMGAwUBBwUALzMvEhc5ERIBOTkRMzEwISMBAScBAREzA7aH/p7+7W4BrgE1hwRS/mpKAnv8PQPDAAABAGgAAAJxBYEABgAcQAsAAQQBBwgDAgUBBQAvLxI5ORESATk5ETMxMCEjEQEnATMCcYr+8G8Bf4oEf/47TAJ7AAAAAAEATAAAA7YFgQAGABxACwABBAEHCAIDBQEFAC8vEjk5ERIBOTkRMzEwISMRAScBMwO2h/1rTgLjhwTP/dFmAnsAAAAAAQBGAAADtgWBAAkAJEAPAAcBBQEKCwQBAwYGCAEIAC8vEjkvMxE5ERIBOTkRMzMxMCEjESEBJwEhETMDtof+v/66YgFqAX+HA+P+mFwBkwEXAAAAAQBMAAADtgWBAAkAIkAQAAcBBQEKCwYDBwIEBQgBCAAvLxIXORESATk5ETMzMTAhIxEBAScBAREzA7aH/rb+w1wBmwFIhwKBATX+22cBef7KAkYAAQBMAAADtgWBAAkAIkAQAAcBBQEKCwYDBAcCBQgBCAAvLxIXORESATk5ETMzMTAhIxEBAScBAREzA7aH/pj+21YBmwFIhwEZAoH+9WkBef3AA1AAAQBMAAADtgWBAAgAH0AOAAYEBgkKBQIDBgQHAQcALy8SFzkREgE5OREzMTAhIwEDJwEBETMDtof+aeZmAXIBcYcDtv7mXAHA/KQEJQAAAAABAEwAAAJxBYEABwAgQA4ABQEEAQgJAgMFAwYBBgAvLxIXORESATk5ETMzMTAhIxEBJwE1MwJxiv7DXgGbigPJ/sBnAZv2AAABAKAAAAO2BYEACAAiQA4AAQQBCQoCAQQFBQcBBwAvLxI5LzMRORESATk5ETMxMCEjEQMhNSEBMwO2h+f+WAFWATmHBFr+QocCXgAAAQCgAAADtgWBAAkAJkAQAAcBBAEKCwIHAQQFBQgBCAAvLxI5LzMROTkREgE5OREzMzEwISMRByE1IQERMwO2h/L+YwFmASmHA5r+hwE5ASUAAAAAAQCgAAADtgWBAAkAJkAQAAcBBAEKCwIHAQQFBQgBCAAvLxI5LzMROTkREgE5OREzMzEwISMRASE1IQERMwO2h/6e/tMBYgEthwElAXeH/sQDmgAAAQCgAAADtgWBAAgAIkAOAAYDBgkKBgEDBAQHAQcALy8SOS8zETkREgE5OREzMTAhIwEhNSEBETMDtof+qP7JAYkBBocCnIf+BARaAAEAVgAAA7YFgQAIAB9ADgABBAEJCgIFBgMEBwEHAC8vEhc5ERIBOTkRMzEwISMRAQE3BQEzA7aH/sH+Zl4BCgFxhwQl/RABjF7+A2AAAAAAAQBWAAADtgWBAAkAIkAQAAcBBAEKCwIHBQYDBQgBCAAvLxIXORESATk5ETMzMTAhIxEBATcBATUzA7aH/rj+b14BEwFohwN//b4BhF7++AJ48gAAAQBYAAADtgWBAAkAIkAQAAcBBAEKCwUCBwYDBQgBCAAvLxIXORESATk5ETMzMTAhIxEBATcBAREzA7aH/rj+cVwBMwFIhwJt/soBg2f+1QExAloAAQBWAAADtgWBAAkAJEAPAAcBBAEKCwUBAwYGCAEIAC8vEjkvMxE5ERIBOTkRMzMxMCEjESEBNwEhETMDtof+mP6PYgFMASuHAVYBbVz+vgOkAAAAAQBWAAADtgWBAAYAHEALAAQCBAcIBAMFAQUALy8SOTkREgE5OREzMTAhIwE3AREzA7aH/SdeAnuHAsFe/Z0ExQAAAAABAFwAAAJxBYEABwAgQA4ABQEDAQgJBAUCAwYBBgAvLxIXORESATk5ETMzMTAhIxEBNwERMwJxiv51XAEvigEzAYtn/tEDiwABADcAAAO2BYEACAAgQA4AAQQBCQoCBQYDBwEDBwAvLzMSFzkREgE5OREzMTAhIxEBATcBATMDtof+y/49cQErAVyHA8P8PQLZTP4YBEQAAAEANwAAA7YFgQAJACNAEAAHAQQBCgsCBQYHBAgBAwgALy8zEhc5ERIBOTkRMzMxMCEjEQEBNwEBNTMDtof+uP5QbwErAV6HAy380wLZTP4KA2npAAEANwAAA7YFgQAJACNAEAAHAQQBCgsFAgYHBAgBAwgALy8zEhc5ERIBOTkRMzMxMCEjEQEBNwEBETMDtof+uP5QbwE/AUqHAj/9wQLZTP3nAkwCKQAAAAABADf//gO2BYEACQAjQBAABwEEAQoLBQIGBwQIAQMIAC8vMxIXORESATk5ETMzMTAhIxEBATcBAREzA7aH/rj+UHEBWAEvhwFY/qYC20z9uAFCA2IAAAAAAQA3AAADtgWBAAcAHkAMAAUCBQgJAwQGBAEGAC8vMxESORESATk5ETMxMCEhATcBMxEzA7b+Mf5QcQGL/IcC2Uz9YgT6AAAAAQA3AAACcQWBAAYAHEALAAQCBAcIBAMFAQUALy8SOTkREgE5OREzMTAhIwE3AREzAnGK/lBxAT+KAtlM/eEEewAAAAABAEgAAAO2BYEABwAcQAsAAQUBCAkEBgEDBgAvMy8SORESATk5ETMxMCEjESMBJwEhA7aH7f5/eQGZAdUE+vxcMQP6AAEARgAAA7YFgQAJACNAEAAHAQUBCgsDBwIEBAYBCAYALzMvEhc5ERIBOTkRMzMxMCEjEQEBJwEBETMDtof+4/6vewGbAU6HA3kBEPzNMQP6/rABUAAAAAABAEYAAAO2BYEACQAjQBAABwEFAQoLAwcCBAQGAQgGAC8zLxIXORESATk5ETMzMTAhIxEBAScBAREzA7aH/s/+w3sBoQFIhwJiAfL9AjED+v3nAhkAAAAAAQBGAAADtgWDAAkAI0AQAAcBBQEKCwMHBAIEBgEIBgAvMy8SFzkREgE5OREzMzEwISM1AQEnAQERMwO2h/60/t57AaEBSIeeA3T9RDED/PyiA1wAAQBGAAADtgWBAAgAIEAOAAYEBgkKAgYDAwUBBwUALzMvEhc5ERIBOTkRMzEwISMBAScBAREzA7aH/qz+5nsBoQFIhwP+/VgxA/r8KQPXAAABAEYAAAJxBYEABgAcQAsAAQQBBwgDAgUBBQAvLxI5ORESATk5ETMxMCEjEQEnATMCcYr+2nsBoYoEHf05MQP6AAAAAAEAgQAAA7YFgQAIABxACwABBQEJCgQCBwEHAC8vEjk5ERIBOTkRMzEwISMRBwEnAQEzA7aH3/6odwFiAUyHBMPL/V49ArkBNQAAAAABAIEAAAO2BYEACQAkQA8ABwEFAQoLBAEDBgYIAQgALy8SOS8zETkREgE5OREzMzEwISMRIwEnASERMwO2h+n+sncBcwE7hwPj/XM9AtcBFwAAAAABADkAAAO2BYEACQAiQBAABwEFAQoLBgMHAgQFCAEIAC8vEhc5ERIBOTkRMzMxMCEjEQEBJwEBETMDtof+0f6udQGuAUiHAlgBjf1xSANF/lQCSgABADkAAAO2BYEACQAiQBAABwEFAQoLBgMHAgQFCAEIAC8vEhc5ERIBOTkRMzMxMCEjNQEBJwEBETMDtof+sP7PdQGuAUiHtALw/bJIA0f9HwN9AAABADkAAAO2BYEACAAfQA4ABgQGCQoFAgYDBAcBBwAvLxIXORESATk5ETMxMCEjAQEnAQERMwO2h/6o/td1Aa4BSIcDk/3DSANH/JYEBgAAAAEAOQAAAnEFgQAHACBADgAFAQQBCAkCAwUDBgEGAC8vEhc5ERIBOTkRMzMxMCEjEQEnATUzAnGK/sl3Aa6KA7j9oD4DS6AAAAEAkQAAA7YFgQAIABxACwABBQEJCgQCBwEHAC8vEjk5ERIBOTkRMzEwISMRAwEnAQEzA7aH2f6WWwFWAUiHBFr+ZP6YYgFWAnMAAAABAJEAAAO2BYEABwAgQA4GAwcCBwgJAwABAwQHBAAvLxIXORESATk5ETMzMTABAScBNTMRIwMv/b1bAp6HhwPb/XtiAuTl+n8AAAAAAQCRAAADtgWBAAkAJEAPAAcBBQEKCwQBAwYGCAEIAC8vEjkvMxE5ERIBOTkRMzMxMCEjESEBJwEhETMDtof+3/7eWwFIAVaHApz+umIBawJeAAAAAQCRAAADtgWBAAkAIkAQAAcBBQEKCwYDBwIEBQgBCAAvLxIXORESATk5ETMzMTAhIxEBAScBAREzA7aH/r3/AFsBVgFIhwFCATH+42IBef7NA4MAAQCRAAADtgWBAAgAH0AOAAYEBgkKBQIDBgQHAQcALy8SFzkREgE5OREzMTAhIwEHJwEBETMDtof+nt1fAVYBSIcCTPhkAXn90wR9AAEATAAAAnEFgQAHACBADgAFAQQBCAkCAwUDBgEGAC8vEhc5ERIBOTkRMzMxMCEjEQEnAREzAnGK/sNeAZuKApP+w2QBnAIrAAEAoAAAA7YFgQAIACJADgABBAEJCgIBBAUFBwEHAC8vEjkvMxE5ERIBOTkRMzEwISMRAyE1IQEzA7aH2/5MAVIBPYcD1/1/hwOkAAABAKAAAAO2BYEACQAmQBAABwEEAQoLAgcBBAUFCAEIAC8vEjkvMxE5ORESATk5ETMzMTAhIxEDITUhAREzA7aH8P5hAU4BQYcDJf4xhwJvATUAAAABAKAAAAO2BYEACQAmQBAABwEEAQoLAgcBBAUFCAEIAC8vEjkvMxE5ORESATk5ETMzMTAhIxEBITUhAREzA7aH/tX+nAEeAXGHApP+w4cBhwIdAAABAKAAAAO2BYEACAAiQA4HBAEECQoECAECAgUIBQAvLxI5LzMRORESATk5ETMxMAEhNSEBETMRIwG+/uIBZAErh4cBVof+7AS4+n8AAQBMAAADtgWBAAgAIEAOAAEEAQkKAgUGAwcBAwcALy8zEhc5ERIBOTkRMzEwISMRAQE3BQEzA7aH/rj+ZVwBAAGHhwPT/C0BeWbrBI0AAAABAEwAAAO2BYEACQAjQBAABwEEAQoLBwIFBgQIAQMIAC8vMxIXORESATk5ETMzMTAhIxEBATcFATUzA7aH/rj+ZVwBCgF9hwNW/KoBeWb4A9rAAAABAEwAAAO2BYEACQAjQBAABwEEAQoLAgcFBgQIAQMIAC8vMxIXORESATk5ETMzMTAhIxEBATcBAREzA7aH/rj+ZVwBHQFqhwI//cEBeWb++gJ/AikAAAAAAQBMAAADtgWBAAkAI0AQAAcBBAEKCwUHAgYECAEDCAAvLzMSFzkREgE5OREzMzEwISMRAQE3AQERMwO2h/64/mVcAT0BSocBM/7NAXlm/tsBNgORAAAAAAEAUAAAA7YFgQAHAB5ADAAFAgUICQMEBgQBBgAvLzMREjkREgE5OREzMTAhIQE3ASERMwO2/jH+aVwBeQEKhwF3aP6oBPoAAAEAUAAAAnEFgQAGABxACwAEAgQHCAQDBQEFAC8vEjk5ERIBOTkRMzEwISMBNwERMwJxiv5pXAE7igF3aP7fBMMAAAAAAQAvAAADtgWBAAcAGkAKAAEFAQgJAQQDBgAvMy8zERIBOTkRMzEwISMRIwEnASEDtofr/myBAbIB1QT6+wYtBVQAAAABADUAAAO2BYEACQAkQBAABwEFAQoLAwcCAwYBBAgGAC8zLzMSFzkREgE5OREzMzEwISMRJQEnAQERMwO2h/76/ot/AbIBSIcDi/r7ey0FVP7JATcAAAAAAQA1AAADtgWBAAkAJEAQAAcBBQEKCwMHAgMGAQQIBgAvMy8zEhc5ERIBOTkRMzMxMCEjEQEBJwEBETMDtof+2/6qfwGyAUiHAkoB7fvJLQVU/dUCKwAAAAEAFwAAA7YFgQAJACRAEAAHAQUBCgsDBwIDBgQBCAYALzMvMxIXORESATk5ETMzMTAhIzUBAScBAREzA7aH/sP+pH8B0AFIh+kDEfwILwVQ/NsDJQAAAAABAB0AAAO2BYEACAAgQA0ABgQGCQoGAgUBAwcFAC8zLzMSOTkREgE5OREzMTAhIwEBJwEBETMDtof+uP6zfQHKAUiHA9f8KS8FUvwpA9cAAAEAKQAAAnEFgQAGABxACwABBAEHCAIFAQMFAC8vMxI5ERIBOTkRMzEwISMRAScBMwJxiv7DgQG+igPX/CktBVQAAAAAAQAvAAADtgWBAAgAHEALAAEFAQkKAgcBBAcALy8zEjkREgE5OREzMTAhIxEHAScBATMDtofV/lR/AbgBSIcEw9f8FDcEAgFIAAAAAAEAJwAAA7YFgQAJACJADgAHAQUBCgsDBgYIAQQIAC8vMxI5LzMREgE5OREzMzEwISMRIwEnASERMwO2h/T+ZXkBtAFUhwPj/B0xBDkBFwAAAQAlAAADtgWBAAkAI0AQAAcBBQEKCwYDBwIECAEECAAvLzMSFzkREgE5OREzMzEwISMRAQEnAQERMwO2h/7j/o57AbwBTocCcQES/H0xBEr+sAJWAAAAAAEAJQAAA7YFgQAJACNAEAAHAQUBCgsGAwcCBAgBBAgALy8zEhc5ERIBOTkRMzMxMCEjNQEBJwEBETMDtof+1/6efwHXATOHxwJo/NE1BED9gQOLAAEAJQAAA7YFgQAIACBADgAGBAYJCgUCBgMHAQMHAC8vMxIXORESATk5ETMxMCEjAQEnAQERMwO2h/64/rl7AcIBSIcDI/zdMQRa/N0EGQAAAQAlAAACcQWBAAcAIEANAAUBBAEICQIFBgEDBgAvLzMSOTkREgE5OREzMzEwISMRAScBNTMCcYr+uXsBwooDefyHMQTBjwAAAQBIAAADtgWBAAYAHEALAAEEAQcIAgUBAwUALy8zEjkREgE5OREzMTAhIxEBJwEzA7aH/Y92AueHBGb7mkQFPQAAAAABAC0AAAO2BYEACQAgQA0ABwEFAQoLAgcIAQQIAC8vMxI5ORESATk5ETMzMTAhIxEHAScBAREzA7aH+P5tdwGgAWKHA7L4/UZGAsoBYwEOAAEAUAAAA7YFgQAJACJADgAHAQUBCgsDBgYIAQQIAC8vMxI5LzMREgE5OREzMzEwISMRIQEnASERMwO2h/74/pxzAYcBWIcCnP1kSALbAl4AAQA1AAADtgWBAAkAI0AQAAcBBQEKCwYDBwIECAEECAAvLzMSFzkREgE5OREzMzEwISMRAQEnAQERMwO2h/7R/qBrAa4BTIcBIQEt/bJMAuP+sAOiAAAAAAEANQAAA7YFgQAIACBADgAGBAYJCgUCBgMHAQMHAC8vMxIXORESATk5ETMxMCEjAQEnAQERMwO2h/64/rlrAa4BTIcCIf3fTgLj/dcEeQAAAQAtAAACcQWBAAcAIEANAAUBBAEICQUCBgEDBgAvLzMSOTkREgE5OREzMzEwISMRAScBETMCcYr+uXMBuooCIf3fSALjAlYAAQBGAAADtgWBAAgAHEALAAEFAQkKAgcBBAcALy8zEjkREgE5OREzMTAhIxEDAScBATMDtofh/lJaAY8BWocD+v2c/mpmAXkDogAAAAEATgAAA7YFgQAJACBADQAHAQUBCgsHAggBBAgALy8zEjk5ERIBOTkRMzMxMCEjEQMBJwEBNTMDtofb/kxSAZkBSIcDaP4w/mhqAYYCpusAAQBOAAADtgWBAAcAIEANAAUBBAEICQUCBgMBBgAvLzMSOTkREgE5OREzMzEwISMRAScBETMDtof9fV4C4YcCc/2ZXgLNAkoAAQBSAAADtgWBAAkAIkAOAAcBBQEKCwMGBggEAQgALy8zEjkvMxESATk5ETMzMTAhIxEhAScBIREzA7aH/s3+uGIBcQFshwFW/rRgAXMDpAABAE4AAAO2BYEACAAgQA4ABgQGCQoFAgYDBwMBBwAvLzMSFzkREgE5OREzMTAhIwEBJwEBETMDtof+uP7DXAGZAUiHATP+1WABhv7MBMcAAAEARgAAAnEFgQAHACBADQAFAQQBCAkCBQYBAwYALy8zEjk5ERIBOTkRMzMxMCEjEQEnAREzAnGK/rlaAaGKASn+12QBfwOeAAEAoAAAA7YFgQAIACBADQABBAEJCgECBQcFBAcALy8zERI5xBESATk5ETMxMCEjEQEhNSEBMwO2h/7f/pIBDgGBhwO8/ESHBPoAAAABAKAAAAO2BYEACQAkQA8ABwEEAQoLAQIHBQgFBAgALy8zERI5OcQREgE5OREzMzEwISMRASE1IQE1MwO2h/7f/pIBEAF/hwMn/NmHBCnRAAEAoAAAA7YFgQAJACRADwAHAQQBCgsBAgcFCAUECAAvLzMREjk5xBESATk5ETMzMTAhIxEBITUhAREzA7aH/uH+kAEcAXOHAkT9vIcC9AIGAAAAAAEAoAAAA7YFgQAJACRADwAHAQQBCgsBAgcFCAUECAAvLzMREjk5xBESATk5ETMzMTAhIxEBITUhAREzA7aH/tX+nAEzAVyHASv+1YcBXAOeAAAA//8AXP/sBQAHAgImAX4AAAEHCT8A/gAAABBACQQDAg0uLQ8ZJQErNTU1AAD//wBc/+wFAAcCAiYBfgAAAQcJQAD+AAAAEEAJBAMCDS4tDxklASs1NTUAAP//AFz/7AUABwICJgF+AAABBwlBAP4AAAAQQAkEAwINLi0PGSUBKzU1NQAA//8AXP/sBQAHAgImAX4AAAEHCUIA/gAAABBACQQDAg1GRQ8ZJQErNTU1AAD//wBc/+wFAAc/AiYBfgAAAQcJVwD+AAAAEEAJBAMCCy01DxklASs1NTUAAP//AFz/7AUABz8CJgF+AAABBwlWAO4AAAAQQAkEAwIMLjYPGSUBKzU1NQAA//8AXP/sBQAHPwImAX4AAAEHCVUA7gAAABBACQQDAgwuNg8ZJQErNTU1AAD//wBc/+wFAAc/AiYBfgAAAQcJVADuAAAAEEAJBAMCDC42DxklASs1NTUAAP///+n/7AMXBwICJgGGAAABBgk/owAAEEAJAwIBBBEQDQAlASs1NTX////p/+wDFwcCAiYBhgAAAQYJQKMAABBACQMCAQQREA0AJQErNTU1////6f/sAxcHAgImAYYAAAEGCUGjAAAQQAkDAgEEERANACUBKzU1Nf///+n/7AMXBwICJgGGAAABBglCowAAEEAJAwIBBCkoDQAlASs1NTX////e/+wDFwc/AiYBhgAAAQYJV6UAABBACQMCAQQQGA0AJQErNTU1////3v/sAxcHPwImAYYAAAEGCVaUAAAQQAkDAgEEERkNACUBKzU1Nf///97/7AMXBz8CJgGGAAABBglVlAAAEEAJAwIBBBEZDQAlASs1NTX////e/+wDFwc/AiYBhgAAAQYJVJQAABBACQMCAQQRGQ0AJQErNTU1//8Aj//uBLwHAgImAZIAAAEHCT8A/AAAABKyAwIBuP/wtBkYBBMlASs1NTX//wCP/+4EvAcCAiYBkgAAAQcJQAD8AAAAEEAJAwIBABkeBBMlASs1NTUAAP//AI//7gS8BwICJgGSAAABBwlBAPwAAAASsgMCAbj/8LQZGAQTJQErNTU1//8Aj//uBLwHAgImAZIAAAEHCUIA/AAAABKyAwIBuP//tDEpBBMlASs1NTX//wCP/+4EvAc/AiYBkgAAAQcJVwEOAAAAEEAJAwIBABggBBMlASs1NTUAAP//AI//7gS8Bz8CJgGSAAABBwlWAP4AAAAQQAkDAgEMGSgEEyUBKzU1NQAA//8Aj//uBLwHPwImAZIAAAEHCVUA/gAAABBACQMCAQAZIQQTJQErNTU1AAD//wCP/+4EvAc/AiYBkgAAAQcJVAD+AAAAEEAJAwIBCxk3BBMlASs1NTUAAP///+j/7AMXB8kCJgGGAAABBglTkgAAEkAKBAMCAQMREA0AJQErNTU1NQAA////6P/sAxcHyQImAYYAAAEGCVKSAAASQAoEAwIBAw8ODQAlASs1NTU1AAD////W/+wDFwe+AiYBhgAAAQYJUZkAABJACgQDAgEINC4NACUBKzU1NTUAAP///9b/7AMXB74CJgGGAAABBglQmQAAEkAKBAMCAQg0Lg0AJQErNTU1NQAA//8Aj//uBLwHyQImAZIAAAEHCVMA7AAAABSzBAMCAbj/8LQZGAQTJQErNTU1NQAA//8Aj//uBLwHyQImAZIAAAEHCVIA7AAAABSzBAMCAbj/8LQYGQQTJQErNTU1NQAA//8Aj//uBLwHvgImAZIAAAEHCVEA7AAAABSzBAMCAbj/7rQ8NgQTJQErNTU1NQAA//8Aj//uBLwHvgImAZIAAAEHCVAA7AAAABSzBAMCAbj/7rQ8NgQTJQErNTU1NQAAAAEAuP5SBWgFzQAeADpAHQIUEBARHAgRCB8gFBESAxESGAxpWRgEAAVpWQAiAD8rABg/KwAYPz8SORESATk5ETMRMxEzMzEwASInERYzMjY1ETQmIyIGFREhETMXMzY2MzIWFREQAgOaaU5QQmZYfYWom/7K7DEIROSM5PPp/lIWAQIUf4cDPpiZ1PH8+gW2vGZt/ev8ef7+/vYA//8AuP5SBckFtgIGAQwAAAABAK7/7AVeBc0AIwBVQC8TEA8iGwcHIhADJCUTEQ8QHxACCRAQEQAjECMCDAMjIx8RAxcLaVkXBB8EaVkfEwA/KwAYPysAGD8SOS9fXl0ROS9eXRE5ERIBFzkRMxEzETMxMAEVFBYzMjY1ETQmIyIGFRUhETMXMzY2MzIWFREUBgQjIAA1NQHjjZiYiX2FqJz+y+wxCETkjOTzkf7uu/7m/sgCMQ6flJ+qAWGYmdTxOQLpvGZt/ev+H6L0ggEh+ykAAAAAAgCa/+wE3QYfABIAJgBfQDQAJhwdHQgZDSAEBA0IJgQnKBwICQkIXllFCQEqCQEDDwkBCgYJCSMWFhBeWRYBIwJeWSMWAD8rABg/KxESADkYL19eXV9dXSsREgA5ERIBFzkRMxEzETMRMxEzMTABEDMyNTQmIyM1MzI2NTQmIyIRJTQkMzIEFRQGBxUWFhUUBCEgJDUBy/HqkYpWRHN6cFrR/s8BDvrpAQ2Xob3A/uL++/78/uQB1/7+7neA6nBlX13+4jHh9s65lasZCBTRstHj5c8AAAAAAwBGBKoC7AcCAAMADAAbAGxAShMaAgQIGg0WAwccHRoQDwkBCQkWCRYFQAkQSAUIAxgDKAMDDgUDDwABOk8ArwDvAP8ABC8AnwC/AAMPAC8AAgBAFRlIAEAOE0gAAC8rK11xcl5dMjJeXS8rMzMvL15dxDIREgEXOREzMTATIRUhJSMmJic1IRYXJTQ2MzIWFRQGBzU2NjUmRgKm/VoCkZE8dB0BCh42/ZFBNEFGkng5QGsFaL78R7A8FY2j4jAyUT5fgANWBSwjBAAAAwBGBKoDDAcCAAMADAAbAGxAShMaDRYaBAcDAgccHRoQDwYBCQYWBhYMQAkQSAwIAxgDKAMDDgwDDwABOk8ArwDvAP8ABC8AnwC/AAMPAC8AAgBAFRlIAEAOE0gAAC8rK11xcl5dMjJeXS8rMzMvL15dxDIREgEXOREzMTATIRUhATY3IRUGBgcjJTQ2MzIWFRQGBzU2NjUmRgKm/VoBaDYeAQogfzKN/u5ANEFHknk5QGoFaL4BFKONFUK5OPowMlE+X38EVgUsIwQAAwBGBKoC7AcCAAMAEgAbAGxASgQGDQYJGBMDAgccHQYQDxgBCRgKChgUQAkQSBQIAxgDKAMDDhQDDwABOk8ArwDvAP8ABC8AnwC/AAMPAC8AAgBAFRlIAEAOE0gAAC8rK11xcl5dMjJeXS8rMzMvL15dxDIREgEXOREzMTATIRUhARQHFBYXFSYmNTQ2MzIWASMmJic1IRYXRgKm/VoBEGo/OXqQRkE1QAGBkTx0HQEKHjYFaL4B9mEEIywFVgSAXj5RMv7WR7A8FY2jAAAAAAMARgSqAwoHAgAOABcAGwBsQEoAAgkCBQ8SGBkHHB0CDA8RAQkRBhEGF0AJEEgXCBsYGygbAw4XGw8YATpPGK8Y7xj/GAQvGJ8YvxgDDxgvGAIYQBUZSBhADhNIGAAvKytdcXJeXTMzXl0vKzMzLy9eXcQyERIBFzkRMzEwARQHFBYXFSYmNTQ2MzIWEzY3IRUGBgcjBSEVIQFza0A5epBGQTVAOTYeAQoddTuR/poCpv1aBqBhBCMsBVYEgF4+UTL+7qONFTuzRT6+AAL+tATZAdcGIQALABUAFUAKAwkQgA8MXwwCDAAvXRrNxjIxMAE0NjMyFhUUBiMiJgU1NjY3IRUGBgf+tEY7OUpKOTtGAQ8cRWsBSEDTRQVzPz4+Pzw/P14bIGKrFUm7LwAAAAAC/o0E2QFvBuUACwAZACNAEwMJCQ8RcAwBAwwBDIAPFl8WAhYAL10azF1dMjk5LzMxMAM0NjMyFhUUBiMiJgczFhc2NzMVBgchJiYngUY7OUpKOTtG8pR5ZmB7lJpS/vojXXAGaD8+Pj88Pj4LUGllVBuqgzZ4fwAD/q4E2QFUBuUAAwAPABsAL0AdEwcHGR8NLw0CDQgDAXkDAVgDaAMCDQMPAK8AAgAAL10yMl1dcS9dMzMRMzEwASEVIRM0NjMyFhUUBiMiJiU0NjMyFhUUBiMiJv6uAqb9WgZGOzlKSjk7RgGURjs5Sko5O0YFmL8Bjz8+Pj88Pj48Pz4+Pzw+PgAAAAP+ngTXAWQG5QALABcALwA9QCoVAw8DCUARF0gJuR3JHQIdLAkDHyQvJD8kAyS2KcYpAiEpJAMPGF8YAhgAL10XM10vXRczXS8rFzMxMAE0NjMyFhUUBiMiJiU0NjMyFhUUBiMiJhMiLgIjIgYHIzY2MzIeAjMyNjczBgb+tEY7OUpKOTtGAZRGOzlKSjk7RkUrTUU9HComDH0Jb10wT0Q8HSkmCX0LbwZoPz4+Pzw+Pjw/Pj4/PD4+/qsbIRwqLmuBHCEcLSxyegAAAAL+ngTXAWQHDAAIACAAV0AQkAQBoAQBBICPAJ8ArwADALj/wEArCQxIALkOyQ7ZDgMOHQAD0BUBHxUvFT8VAxW2GsYa1hoDGhIVAw8JXwkCCQAvXRczXS9dXRczXS8rXRrNXXExMAMmJic1IRYXFQMiLgIjIgYHIzY2MzIeAjMyNjczBgYdSbFBAUxWZiMrTUU9HComDH0Jb10wT0Q8HSkmCX0LbwX2Mo9BFIpyGv7hGyEcKi5rgRwhHC0scnoAAAL+ngTXAWQHDAAIACAAV0AQkAQBoAQBBICPAJ8ArwADALj/wEArCQxIALkOyQ7ZDgMOHQAD0BUBHxUvFT8VAxW2GsYa1hoDGhIVAw8JXwkCCQAvXRczXS9dXRczXS8rXRrNXXExMAM1NjchFQYGBxMiLgIjIgYHIzY2MzIeAjMyNjczBgawZlYBTEGySHArTUU9HComDH0Jb10wT0Q8HSkmCX0LbwX2GnKKFEGQMf7hGyEcKi5rgRwhHC0scnoAAAL+rgTZAVQHDAAJAA0AM0AhkASgBAIEgE8AAQAAAQAIDQF5DQFYDWgNAgANDwqvCgIKAC9dMzNdXXEvXV0azV0xMAMmJic1IRYWFxUFIRUhBEnWLwFMHn0y/ecCpv1aBeMyszAUMqY2G0u/AAL+rgTZAVQHDAAJAA0AM0AhkASgBAIEgE8AAQAAAQAIDQF5DQFYDWgNAgANDwqvCgIKAC9dMzNdXXEvXV0azF0xMAM1NjY3IRUGBgcFIRUhxT5zHAFMMclU/qgCpv1aBeMbRpkvFDGpO0u/AAP+iwTLAXUHDgALABcAJgA/QCkbHWAYAYAYAZAYoBgCGICPI/8jAgAjICMwIwMjIw8DAxUPCV8JrwkDCQAvXTMzETMzL11dGsxdcXIyOTEwATQ2MzIWFRQGIyImJTQ2MzIWFRQGIyImATMWFzY3MxUGBgchJiYn/rRGOzlKSjk7RgGURjs5Sko5O0b+Q46SVU6ZjoBRG/7uMIA8BUY/Pj4/PD8/PD8+Pj88Pz8CBEpNRVIagF0nRYQ7AAAAA/6uBMsBVAbXAAMADwAbACNAFANPAI8AAgAAEwcHGQ8NXw2vDQMNAC9dMzMRMzMvXTIxMAEhFSEXNDYzMhYVFAYjIiYlNDYzMhYVFAYjIib+rgKm/VoGRjs5Sko5O0YBlEY7OUpKOTtGBte+0z8+Pj88Pz88Pz4+Pzw/PwAAAAAC/p4E2QFkBtcAFwAbAD9ALGgbeBsCGx8YAY8YnxgCMBgBGAUUGAMfDC8MPwy/DM8MBQwRCQwDDwBfAAIAAC9dFzIvXRczL11dcTNdMTATIi4CIyIGByM2NjMyHgIzMjY3MwYGASEVIY0rTUU9HComDH0Jb10wT0Q8HSkmCX0Lb/3EAqb9WgTZGyEcKS9rgRwhGywscnoB/r4AAAAC/q4EywFUBtcAAwAPAC1AHmgDeAMCA08AAQ8APwBPAI8ABAAABwcPDV8Nrw0DDQAvXTMRMy9dcTJdMTABIRUhFzQ2MzIWFRQGIyIm/q4Cpv1a0UY7OUpKOTtGBte+0z8+Pj88Pz8AAAL+rgYdAVQIKQADAA8AM0AhfQMBBWgDAQNPAAGPAAFPAAEAAAcHDw0vDT8Nbw1/DQUNAC9dMxEzL11dcTJdX10xMAEhFSEXNDYzMhYVFAYjIib+rgKm/VrRRjs5Sko5O0YIKb/SPz09Pzw/PwAAAAAEAD0EtgMSB74ACwAXAB8ALQBsQEYGABwfEgwgDB8AJgUuL98aARqAH0ARFkgAHxAfIB8DHx8jLXAm0CYCJiYqHyN/I48jAyMjDwMDFQ8JLwlfCX8JrwnPCQYJAC9dMzMRMzMvXTMzL10zEjkvXSsazF0REgEXOREzETMRMzEwEzQ2MzIWFRQGIyImJTQ2MzIWFRQGIyImAzY3MxUGByMlBgYjIiYnMxYWMzI2N1xGOzlKSjk7RgGURjs5Sko5O0asUiLeXHODAc4Lw6CntwmWCHNYWHEKBTE/Pj4/PD8/PD8+Pj88Pz8CAX9JFG5hTp60raVXU1xOAAQAPQS2AxIHvgALABcAHwAtAGxARgYAGBsSDCAMGwAmBS4v3xwBHIAZQBEWSAAZEBkgGQMZGSMtcCbQJgImJiofI38jjyMDIyMPAwMVDwkvCV8JfwmvCc8JBgkAL10zMxEzMy9dMzMvXTMSOS9dKxrNXRESARc5ETMRMxEzMTATNDYzMhYVFAYjIiYlNDYzMhYVFAYjIiYTIyYnNTMWFyUGBiMiJiczFhYzMjY3XEY7OUpKOTtGAZRGOzlKSjk7RhyDc1zeKkoBBgvDoKe3CZYIc1hYcQoFMT8+Pj88Pz88Pz4+Pzw/PwHmYW4UWHAznrStpVdTXE4ABABWBLYC/AfJAAMADwAbACQAfkBWCgEEICQAFhYQECQEAyUm3x4BHoA/JG8kfySPJAQkaAF4AQIkAVkCAUkCATgCAR8CTwJfAs8C3wIFHwKPAp8C3wIEAgITBwcZDw0vDV8Nfw2vDc8NBg0AL10zMxEzMy9dcV1dXTMzXS9dGsxdERIBFzkRMxEzETMRMzMxMAEhNSEBNDYzMhYVFAYjIiYlNDYzMhYVFAYjIiYDNjczFQYGByMC/P1aAqb9YEY7OUpKOTtGAZRGOzlKSjk7RqZFL94dezeDBe6+/oU/Pj4/PD8/PD8+Pj88Pz8CC2VkFSN+LgAAAAAEAFYEtgL8B8kAAwAPABsAJAB+QFYKAwQcIAIWFhAQIAQDJSbfIQEhgD8dbx1/HY8dBB1oA3gDAh0DWQABSQABOAABHwBPAF8AzwDfAAUfAI8AnwDfAAQAABMHBxkPDS8NXw1/Da8Nzw0GDQAvXTMzETMzL11xXV1dMjJdL10azV0REgEXOREzETMRMxEzMzEwEyEVIRc0NjMyFhUUBiMiJiU0NjMyFhUUBiMiJhMjJiYnNTMWF1YCpv1aBkY7OUpKOTtGAZRGOzlKSjk7RhiDN3oe3S9GBqy+vT8+Pj88Pz88Pz4+Pzw/PwHwLn0kFWBpAAAAAwBKBKQDGwc/AA0AHAAlAGJAQSELEBcLJSUXAwMmJxAaH4AUJUAKE0glJQofAwEPAx8DLwN/A48DnwMGAwMH/wABDwAvAF8AfwCfAK8AzwDvAAgAAC9dcjIyL11xMzMvK8Ya3MQyERIBFzkRMxEzETMxMAEiJiczFhYzMjY3MwYGAxQHFBYXFSYmNTQ2MzIWEzY3IRUGBgcjAaygsw+dE1hYWVoQmQ+0wGo/OXeTRkE1QDk3HQELIHY3kgSkiIQxMzIygooCOWAEIywFVgKAXz5RMv7vpYoUQ69CAAMASgSkAwYHPwANABwAJgBgQEEUAxAXCx0hFwMFJygQGiKAFB5AChNIHh4KHwMBDwMfAy8DfwOPA58DBgMDB/8AAQ8ALwBfAH8AnwCvAM8A7wAIAAAvXXIyMi9dcTMzLyvGGt3EMhESARc5ETMRMzEwASImJzMWFjMyNjczBgYDFAcUFhcVJiY1NDYzMhYBIyYmJzUhFhYXAaygsw+dE1hYWVoQmQ+022pAOXeTRkE0QAGBkTV5HwELBzEbBKSIhDEzMjKCigI5YAQjLAVWAoBfPlEy/tY/tUAUKLVSAAAAAwBKBKQDHQc/AA0AFgAlAGRAQxILHSQLFhYkFyADBSYnJBoQgCAWQAoTSBYWCh8DAQ8DHwMvA38DjwOfAwYDAwf/AAEPAC8AXwB/AJ8ArwDPAO8ACAAAL11yMjIvXXEzMy8rxBrcxDIREgEXOREzETMRMzEwASImJzMWFjMyNjczBgYDNjchFQYGByMlNDYzMhYVFAYHNTY2NSYBrKCzD50TWFhZWhCZD7SFNx0BCx+BMY7+7kA1QUaTdzlAawSkiIQxMzIygooBWKWKFEG+NfowMlE+YH8CVgUsIwQAAAADADkEpAL2Bz8ADAAVACQAXkBBHCMKDREjFh8CByUmIxkSgB8OQAoTSA4OCh8DAQ8DHwMvA38DjwOfAwYDAwb/AAEPAC8AXwB/AJ8ArwDPAO8ACAAAL11yMjIvXXEzMy8rxBrdxDIREgEXOREzMTABIAMzFhYzMjY3MwYGEyMmJic1IRYXJTQ2MzIWFRQGBzU2NjUmAZz+wCOeE1hYWVoPmg+ypJE1eR8BCh03/ZFBNEFGk3c5QGsEpAEMMTMxM4CMAT8/tUAUiqXhMDJRPmB/AlYFLCMEAAABABD/7AgjBbYAGwBLQCcaAAAdGAUbAgIFBA4EHB0FAhgCGAQWGQMBBBIWB2lZFgMMEWlZDBMAPysAGD8rABg/Mz8REjk5ERI5ERIBFzkRMxEzETMRMzEwISEBASEBASMHAgIGIyInNRYzMjYSEhMhAQEhAQgj/p7+rP6s/rQB5f7f7xA+X7OeVEA6MzU+N1sgAqABOwE1AU7+NQIp/dcC8gHChv4B/mWoFv4UYQEHAlcBC/3yAg79KwAAAAEAAP/sBwIEXgAZAEdAJQgUERcTFRUXABkEGhsAFxEXERkPEg8WGRUPAmBZDw8GC2FZBhYAPysAGD8rABg/Mz8REjk5ERI5ERIBFzkRMxEzMzIxMAEDIwICBiMiJzUWMzI2EhMhExMhAQEhAwMhA/LR4iBcmXxqRDExOU09FgKo2dwBWv6TAX3+puzr/qYCOwE+/on+j6Ug9BSkAX8BT/6cAWT93f3FAX/+gQACALgAAAbwBbYADgAXAGBANQsNDRkPBAQFCRMMAAATBQMYGQkMAAMDBgMPaVlQAwEPAx8DAgkDAwMFBgoDDgUSBhdpWQYDAD8rABg/Mz8REjkvX15dXSsREgAXORESARc5ETMRMxEzETMRMxEzMTABBiEjESERISATEyEBASEBMzI2NTQmIyMEDoz+8YX+ygHTAeYz2wFO/jUB7v6d/GFmj453f40Cf3f9+AW2/owBdP0r/R8DBnFsbWgAAAIAoP4UBtkEcwAYACQAUkAsExYKAwMHEBYWIhIUFCIHAyUmCgIWEAQTDREIDxUVBxsNGV1ZDRAAIF1ZABYAPysAGD8rABg/Pz8zERIXORESARc5ETMRMxEzETMRMxEzMTAFIicjFhURIREzFzM2MzIWFxMhAQEhAwYGAyIGBxUUFjMyETQmAwbFcBAQ/s/4Kw5r0pjPKdEBWv6TAX3+pt8l1/hxaAJrdM1lFI+MFv47BkqRprytAVT93f3FAWi0yAOTi6AhtJwBUqWlAAL/9gAABqQFtgAVAB4ApUBjDg8aGhIDBwceCgEJCQUKEgQfIAMGaVlGAwHWAwESAwEDIQMBsQMBBEwDAaMDAQMeDEkZAwEDDwOPAwIJBgMDChUPDB0dDGlZAB0QHQIQAx0dChUOEgIXFRdpWRUDCgdpWQoSAD8rABg/KxEAMxg/ERI5L19eXSsREgA5ERI5GC9fXl1fXStdXV9dcV9xXXErERIBFzkRMxEzMxEzETMRMzIxMAEVIREhFSERIREhESMBIQEmJjU0JCEXIyIGFRQWMzMGpP3rAfD+EAIV/MDA/qr+qAGgfIQBHQELppl4hICEkQW2/v6//v6H/wACMf3PAoMy0Y7J2f5WZGFwAAAAAwAA/+wG5QRzACAAKQAwAJ1AWgYHIR4WIQoPJRcDFi4uAyUKBDEyDwANDxcBDQUtF2ZZpS0BaS0BDC0cLQIQAy0tABIHBCQkBGJZYCSgJAIDJCQNBhUPKgEMBhIqX1kSEA0nYlkNDwAaYFkAFgA/KwAYPysAGD8rAF9eXRg/EjkvX10rERIAORESORgvX15dXV0rAF9eXRESORESARc5ETMRMxEzETMRMxEzMjEwBSIkJyMDIQEmJjU0NjMhFTY2MzISFRUhFhYzMjY3FQYGARQWMzMRIyIGJSIGByEmJgUd7P7eIaj8/rYBLWxv89IB6zCXUNH7/S8FkIJltGJQtvvdblmq0UtVA3VhbggBrAJyFOTQ/mABui2qc6K4UjI1/u/rlIKSKi7sKCcDFkZPARpJXHtxcXsAAQC4AAAFYAXLABIAQkAhDQAAFA8OEggHAwMECwkOCAgJBAMTFBIHAgMEDAUDAQQSAD8zPzMSFzkREgEXOREzETMRMxEzETMRMxEzETMxMCEhAREhESERASc3FzchARcHJwcFYP6g/e7+ygE2ASDEk6pzAUr++eyW0JQC5f0bBbb9PAGBz4mwm/6m9onZvgAAAQCgAAAE9ARzABIAQ0AiAhQPEQENCQUFBg0LEAoRAAAKCwYEExQBBAkDBg4HDwMGFQA/Mz8zEhc5ERIBFzkRMxEzETMRMxEzETMRMxEzMTABBwEhAREhESEREyc3FzchAxcHA3FfAeL+pv43/s8BMfB7i2ZGAVDksYwCtnD9ugI3/ckEXv3hATCBg21Y/u65gwAAAAEAEP4ACIUFtgAsAGRAPA0AGRkaBhMTGiMDLS4KEGpZCgIWallQAmACgALAAtAC8AIGDwIBCwMCAhorCiMaEiscaVkrAyEmaVkhEwA/KwAYPysAGD8/ERI5L19eXV0rABgvKxESARc5ETMRMxEzMzEwATYzMgQSFRQCBiMiJicRFjMyNjU0JiMiBxEhESEHAgIGJyInNRYzMjYSEhMhBPprlcMBMJiL+5lui0qBhYyh5vc1Sv7L/t0QPl+2m1RAOjM1PjdbIANYAycMrP7Ry8P+16EWGQEQL86vxMgN/fQEtIb+Af5jqAIW/hRhAQcCVwELAAABAAD+Cga6BF4AKABUQC4gCwAXFxgFEhgSKSoBFmFZDwEfAQILAwEBJxgVJxpgWScPHiNhWR4WCQ9hWQkcAD8rABg/KwAYPysAGD8SOS9fXl0rERIBOTkRMxEzETMzMjEwATcyFhIVFAIGIyInERYWMzI2NTQmIyMRIREjAgIGIyInNRYzMjYSEyEEXlCX8YR64JOOci15MXR+m5UI/s/uIFyZfGpEMTE5TT0WAyMCfQSN/v2ts/7+hTMBBxgepZWVnP6HA3n+if6PpSD0FKQBfwFPAAEAuP4ACNMFtgAkAKtAag0ZIR0dHgAZGSIaBhMTGh4DJSYKEGpZCgAhAQwGIRxpWUYhAdYhARIhAQMhIQGxIQEEoyEBTCEBOyEBGSEBAw8hjyECCQYhIR4fAhZqWRACAXACgAKgAuAC8AIFLwIBAgIeHwojHwMaHhIAPzM/My8REjkvXV1xKxESADkYL19eXV9dXV1dX11xX3FdcSsAX15dGC8rERIBFzkRMxEzMxEzETMRMxEzMTABNjMyBBIVFAIGIyImJxEWMzI2NTQmIyIHESERIREhESERIREhBUhqlsMBMJiL+5lui0qBhYui5vc1Sv7K/dz+ygE2AiQBNgMnDKz+0cvD/tehFhkBEC/NsMTIDf30Anf9iQW2/cMCPQAAAAABAKD+CgcIBF4AIgCDQE4LFx8bGxwAFxcgGAUSEhgcAyMkHxpgWR8kHyBIuh/KHwJmH/YfAgMfJA1JDx8BCgYfHxwdARZhWQ8BHwECCwMBARwhHQ8YHBUJD2FZCRwAPysAGD8zPzMSOS9fXl0rERIAORgvX15dK19dXSsrERIBFzkRMxEzMxEzETMRMxEzMTABNzIWEhUUAgYjIicRFhYzMjY1NCYjIxEhESERIREhESERIQSsUJfxhHrgk45yLXkxdH2ckwj+z/5W/s8BMQGqATECfQSN/v2ts/7+hTMBBxgeo5eXmv6HAc3+MwRe/lIBrgAAAQC4/lYGaAW2AAsANkAcBwgLBAECAgQIAwwNCBICIgkGaVkJAwQLalkEEgA/KwAYPysAGD8/ERIBFzkRMxEzETMxMAERIREhESERIREhEQZo/tX+y/3m/soEhQEK/UwBqgS0+0wFtvtUAAAAAQCg/m8FqgReAAsAN0AcBwgLBAECAgQIAwwNAgQIFQkGYFkJDwQLX1kEFQA/KwAYPysAGD8QxhESARc5ETMRMxEzMTAlESERIREhESERIREFqv7u/s7+a/7PA/jf/ZABkQN5/IcEXvyBAAAAAQC4/lYGkQW2ABcAPkAhEAwMDRcEAQICBA0DGBkTCGlZExMEDgMNEgIiBBdqWQQSAD8rABg/Pz8SOS8rERIBFzkRMxEzETMRMzEwAREhESERNCYjIgYHESERIRE2NjMyFhURBpH+1f7LYnVPp3b+ygE2k9ZbzuYBCv1MAaoCBGprICr9cQW2/cszJ8e4/q4AAQCg/m8FugYUABkAREAjEg8LCwwZBAECAgQMAxobEhUCBA0ADBUVB11ZFRAEGV9ZBBUAPysAGD8rABg/PxDGEjkREgEXOREzETMRMxEzMzEwJREhESERNCMiBhURIREhERQHBzM2MzIWFREFuv7u/s+0gHL+zwExBwcQZt7FzN/9kAGRAo3yrsP98gYU/sMliVqk1Mb+BgABAK4EoAPuBaQADQA8QCUMAQ4PBQkJAwcLgEsNuw0Cew2rDbsNyw3rDQUNQAoOSA8NAQINAC9fXStdcRrNMjI5ETMREgE5OTEwARUHIycjByMnIwcjJzUD7lI4MZkyNzGaMTdQBaRYrGZmZmasWAAAAQAK/+wEYgW2ABsAdUBLBQYPExYZAgULDRELABcGBh0cAAEaDA8OBg0bGBYZEBMSBhEXABtAGwILAA0QDUANUA0EDAMGDREbFxcbEQ0GBQkUBgkCc1kJGQsYAD8/KwAYPxIXOS8vLy8vX15dXl0REhc5ERIXORESARc5ERczETMxMAEFETY2NSEQACEiJxEHNTc1BzU3ESEVJRUFFSUDO/7BrKIBGP6O/ptuTMfHx8cBKwE//sEBPwNKb/4KHOfl/qT+exQCc0SyRH9EskQBYPpvsm9/bwAAAgDBAAAECgW2AAMABwAAEyERITchESHBA0n8t2gCef2HBbb6SmgE5gAAAAAD/KgE+gCLBywACwAZABoAAAEyFhUUBiMiJjU0NgUGBiMiJic3FhYzMjY3Af6eMUdIMDBIRwIeMvHIsP5K0jCQa2t7H/6fByxGMjJGRjIyRkvS0NLMT5qOip79zgAAAAAC/jwE+v9oBsgACwAMAAABMhYVFAYjIiY1NDYD/tI/V1c/P1dXSgbIV0JCV1dCQlf+MgAAAwCbAFkBxwT6AAsAFwAYAAABMhYVFAYjIiY1NDYTMhYVFAYjIiY1NDYTATE/V1c/P1dXPz9XVz8/V1c/BBZXQkJXV0JCV/11V0JCV1dCQlcDbwD//wAoAAAG4AcsAiYJcgAAAQcJsQbDAAAAAAAAAAEAKAAABuAFDwAyAAABNjMyFhUUBxYzMjY3NSM1IRUjESERBgcWFRQGIyImJic3FhYzMjU0JwYHJzY2NTQjIgcBRaq7p8F7J0ZJf0CCAlbB/u1bkArBsoXX0GTsV8N1eEJFVQtrZIZohAS4V7CPnWAPPELW4uL76AI7NQkqIpWhYv/tWuLfdmFHDwLYBjpDZEMAAAABACgAAAk7BQ8ANgAAATYzMhYVFAcWMzI2NzUjNSEVIxEhESERIREGBxYVFAYjIiYmJzcWFjMyNTQnBgcnNjY1NCMiBwFFqrunwXsnRkl/QIIEscH+7f64/u1bkArBsoXX0GTsV8N1eEJFVQtrZIZohAS4V7CPnWAPPELW4uL76AQY++gCOzUJKiKVoWL/7Vri33ZhRw8C2AY6Q2RDAAAAAgAA/vsEPAT6ADIAMwAAASYmNTQ2NjMzNSE1IRUjESEiBgYVFBc2MzIWFRQGBxYXByYnJiY1NDYzMhczMjY1NCMiEwF/ga1PkZuK/aoEPNP+aTgnFkJdZ6fKlYh3jVz86It4SzRnZg12eYxVdwF+Ja5hTW05c+Li/sMLFhA0JB6qjGmbHTIpw2ahCFFRPEleQjlWA1YAAP//AAD++wQ8BywCJgl0AAABBwoPBA0AAAAAAAAAAgAAAAEEygT6ACMAJAAAARQGIyIAAzcSFjMyNTQmJwYHJzY2NTQnITUhFSMWFRQGBxYWAQRTyb7p/suG7GrEdYw+TD9IDId2Pv0bBMq2IkpKWFn+sgE+mqMBPgE/Wv787H0/aDYMA9kER0w/NuLiPUlPfjBTqwNjAAAAAAIAAP//Br0E+gA0ADUAAAEUBiMiAAM3EhYzMjU0JicGByc2NjU0JyE1IRUhFhUUBgcWFzYzMhYVFAcnNjY1NCYjIgcWAQRTyb7p/suG7GrEdYw+TD9IDId2Pv0bBr39VyJKShcWlJ2is3nvJy81OEZMD/6yAT6aowE+AT9a/vzsfT9oNgwD2QRHTD824uI9SU9+MBUXXqmayruEM303PzxCMAOHAAAAAAMAAP+RB3UE+gA9AD4APwAAAQEnASYmIyIGByc2NjMyFhcRITUhFSERNjcmNTQ2MzIWFRQGBxYXBwYVFDMyNjcXBiMiJjU0NjcmJwYHESERAQMY/fOMAe4+Xz8+d05YZKNQe6dp/OgHdfy2c2MFY05aZiwkGXExtn00ZT9PjKebx1RXDxGDrP7tA14Bk/7EvwEWQTUlJ9swK2d9AXDi4v6JBhcVGUdUZE8vTRUrmUsJbV8gIMdHpoNbgSwaJSoK/jME+v0MAAAAAwAA/xQGSQT6ADkAOgA7AAABIgYHJzY3JiMiBhUUFhcHJgI1NDYzMhYXNjc1ITUhFSEVFhYVFAcGFRQzMjY3FwYjIiY1NDc2NTQmAxMELkl+Iv8SGkhHPEmKs7HfwMeZZJxOWH38MgYs/rRrb1+2fTRlP0+Mp5vHyCw/m2EChIWATkQ5OEI2UZyJtrEBA4ydrTA3SBW64uLLKa51paUMal8gIMdHpoPFT1VJSksCdvyPAAD//wAA/msE+gcsAiYJfAAAAQcJsARmAAAAAAAA//8AAP5rBPoHLAImCXwAAAEHCbEE9gAAAAAAAAACAAD+awT6BPoAKwAsAAABFSMRFAYGByc+AjU1IREUFhYXBRYWFRQGByc2NjU0JiYnJy4DNREjNSEE+qhCnpQtRTcU/lwdTWQBH3RePTLgFyEiTVb3S1s9G4sDQQT64v76gpNZDuEJKEFN4v6zc2hMOqVDiG5FnT+BGUwgMDk5Mo8rTmB5bgGE4gAA//8AAP5rBPoHLAImCXwAAAEHCbIE9gAAAAAAAP//ACgAAAmUBywCJglzAAABBwmwCQkAAAAAAAD//wAoAAAJOwcsAiYJcwAAAQcJsQkeAAAAAAAA//8AKAAACTsHLAImCXMAAAEHCbIJHgAAAAAAAP//ACgAAAk7BywCJglzAAABBwmzCR4AAAAAAAAAAgAAAAAGswT6AC4ALwAAAQYGIyImNTQ2MzIXByYjIgYVFBYzMjcRITUhFSERNjMyFhUUBgcnNjU0IyIHESERAw1OilC25PDEh0kVOFpgaVhKkHz88waz/W1hfpW3S0TvbHJQV/7tAQUtI8asp80R5BBMREZJgQIA4uL+4zq3nGXeZ4SVeohY/gUE+gACAAAAAAdIBPoAGAA6AAAlBiMiJC4CNTQ2MzIXNjU0JyE1IRUjESEBFhUUBgcWBDMyNzUGIyImNTQ2MzIXByYjIgYVFBYzMjcRBXSp25r+5+avaVVOXEUpD/5jB0jB/u39NxSBiH4BF6TdqG6fpL3IrnNGFDZKTEtJP3xmU05dnbWgSUNXP0tnNzfi4vvoBBhFVYPBRIuEWFs9o5CRrBHHDzk1NzlWAZwAAAAAAQAAAAAE/gT6ABUAAAERFAYjIiYmNTQ2MzMRIzUhFSMRIREB90JHPJJgSEgV5QT+wf7uBBj9tk1NYY47R0ABM+Li++gEGAAAAgAAAAAFRgT6ABMAKAAAAQYGIyImNTQ3JjU0NyM1IRUjESEBBhUUFzYzMhcHJiMiBhUUFjMyNxEDck+UWr7bM40RUwVGwf7t/fsYVkFHMjQNFSFTVFNHp4IBIi4nspRhSleSRC3i4vvoBBghKVoeDgncAzczMjeTAdYAAAQAAAAABj0E+gArADcAOAA5AAABJiY1ND4CMzM1ITUhFSERISIGBhUUFzYzMhYVFAYjIiQnNxYEMzI1NCMiATIWFRQGIyImNTQ2AQMC2IKsNV1/amz8bwY9/mf+hzQoGT1Wd6rN8Nz2/mux1JEBG57WkVMCMjxPTzw8UFD+XKYBfiatYUBePBlz4uL+wwkYFC8iG6mNj6vU05q1pmhWAaJRPT1RUT09UQG0+wYAAAAAAQAAAAAFowT6ACAAAAEGBiMiJjU0NwU1ITIXByMiBhUUFjMyNjcRITUhFSMRIQPQSIlWt91K/uwCekYxFixpaU9EV4E9/DAFo8H+7gEFKCOqinZHB+QF3UVIOUQ/RwH24uL76AAAAAAEAAAAAAYxBPoAQABBAEIAQwAAARYWFRQCBCMiJjU0NyY1NDYzMhcHJiMiFRQXNjMyFwcmIyIGFRQzMjY2NTQmIyIGFRQWFwcmJjU0Njc1ITUhFSElAxME1Xd9xv6U8u77LaTCsHhKFD9Te2E5SDcqFRcoRT3pku+IQz8lL09RZ5ubcW38PgYx/qT+7X3mA6kmtoic/u+YpZNQQlude40R1RNIORoOCM4DKChlX6tqUVooHjFAFrgynmxjhRdh4uLi+wYDwQAAAAEAAAAABqUE+gAeAAABIyInFhUUBiMiAAM3EhYzMjY1NCc3ITUhNSEVIxEhBNGwaRNwwpzo/sZt61rAcDA4xVcCOvsvBqXB/u0CpgN4h4ukAVEBRlf+9/4zMox1wJDi4vvoAAACAAD++waSBPoAKgA9AAABBgcGBgcWFwcmJyYmNTQ2MzIXMzI2NTQjIgcmJjU0NjYzMzUhNSEVIxEhAREhIgYGFRQXNjMyFxYzMjY3EQS+aYIGl393jVz86It4SzRnZg12eYxVYIGtT5Gbiv2qBpLB/u3+q/5pOCcWQl1nsmgXGkqEQgFtOAhpkBsyKcNmoQhRUTxJXkI5ViYlrmFNbTlz4uL76AQY/sMLFhA0JB5hAzI9AZ0AAAAAAQAAAAAGvwT6ACgAAAEGIyMGBiMiJCc3FhYzMjY1NCYjIgYHJzYzMhYXFjMyNxEhNSEVIxEhBOs/TBUf5aTO/sJv6U7AclxeUUkvSzRTfpGYziIUFkE5+xUGv8H+7QHbDIyN7OxhqalJRUBGEhfVO4Z5AhIBb+Li++gAAAMAAAAABHAE+gAbABwAHQAAASMiBgYVFBYzMjY3FwYGIyIkNTQkNzUhNSEVISURA1B5nZJMnZNTmmBWc+F+7P7ZAQ/k/cMEcP7g/u0CnjFkP21wLDXmNTP+07bmDJ/i4uL7BgAEAAAAAAUPBPoAEwAhACIAIwAAASEVFhYVFAQjIiQmNTQkNzUhNSEBIyIGBhUUFjMyNjU0JgMRBQ/+Qaqu/tr/of75kQEL6f3CBQ/9wAqfiz+YhoiUY/UEGK5I8ZvC1HTUirbkDZ/i/aQ9Xj1seGpjTXsCg/sGAAADAAAAAAWfBPoAKwAsAC0AAAEmJjU0PgIzMzUhNSEVIxEhIgYGFRQXNjMyFhUUBiMiJCc3FgQzMjU0IyITAwLYgqw1XX9qlPxHBZ/T/l80KBk9VneqzfDc9v5rsdSRARue1pFTes4BfiatYUBePBlz4uL+wwkYFC8iG6mNj6vU05q1pmhWA1b7BgAAAAQAAAAABO4E+gAgACoAKwAsAAABIyIGBhUUFhcmNTQ2MzIWFRQEIyIkJjU0JCU1ITUhFSEDNjU0JiMiBhUUAxEDg5Kdl2BRUAy3qZyw/u7qsv7tkgEjAQr9iATu/pWYuTApNDxjArssbVFVeh0wN4CWm3uXtXbajsXtCn7i4vy4DG4nL0EyLQP6+wYAAgAAAAAGfwT6ABIAHwAAAREUBgYjIiYmNREjNSEVIxEhESEhERQeAjMyPgI1A8hXto+TtVmLBn/B/u3+C/7nCh82LCc2JQwEGP5tl6JcZLq2AVTi4vvoBBj+kT1DOR8VNkFOAAEAAP/eBRsE+gAZAAABIyIGBhUUFhcHJgI1NDY2MyE1ITUhFSMRIQNHuXtyQXqAsralfcOwAQr8uQUbwf7tAmQaRTtClV+2lgEDgXCcQtLi4vvoAAEAVQAABdsFDwAtAAABBgYjIiYnNjY1NCYjIgYVFBYXByYmNTQ2MzIWFRQGBxYWMzI2NxEjNSEVIxEhBAg+qmzB+zmpqklDKi1JOzuJmq6Rt9qTjiNiP1+aR4ACU8H+7gEsKind3iOIdEtVJx8uJwizE5R0dYLCqpDCOS8uRE0BzOLi++gAAAADAAD/dASUBPoAKQAqACsAABM0Njc1ITUhFSERIyIOAhUUFjMyNyY1NDYzMhYVFAYHFhcHJicGIyIkAQFL/ej90ASU/q5pfoBXJ4yNFw4DYUxmeTE8L2TfR0sbXvL+3QHlAQsCJ6vFDHXi4v6rFS9AK1lZAhgTQkttVTxQHFaEZHKiBtoDnvsGAAAAAAEAVwAABZsFDwA3AAABBgYjIiY1NDcmJjU0NjMyFhUUByc2NTQmIyIGFRQWFzYzMhcHJiMiBhUUFjMyNjcRIzUhFSMRIQPHU45cv9o3ZWzAkomoWZwdKiQ0PWlVP0gyNA0VIVNUU0dUlUByAkbB/u0BAS4kr5JkSjWoaIKqgGV4TmIcIiElOzJEXg0OCdIDNzMyN0pLAfLi4vvoAAEAAAAABQ8E+gAVAAABIRUUBiMiJiY1NDYzIREhNSEVIxEhAzz+u0NEPZRfSEgCbPzEBQ/B/u4CHjVPS2KNO0dAARji4vvo//8AAP9/BQ8E+gImCZUAAAEHCacDtgErAAAAAAACAAAAAAUFBPoAEAAaAAABBgYjIi4CNREjNSEVIxEhAREUFhYzMjY3EQMyRoRjWpFmKYsFBcH+7v5rGUNEPYQ0AUckHzd0ln0BVuLi++gEGP67ZFU1PTUBwQAAAAMAAAAABq8E+gAgACoAKwAAAREhEQYGIyIuAjURIzUhFSERNjMyFhUUBgcnNjU0IyIBERQWFjMyNjcRNQQc/u4zglBakWYpiwav/W1hfpW3S0TvbHJQ/SoZQ0Q1ZzEB+/4FAUAeHjd0ln0BVuLi/uM6t5xl3meElXqIAcX+u2RVNSwsAdviAAACAAAAAAUTBPoAHAAlAAABBgYjIiY1NDYzMhcHJiMjIgcXNjcRITUhFSMRIQMDBgYVFBYzMgM/SZltu+r0xalUFUNlDQcHvycw/MEFE8H+7eDQHBxeXi0BASgkx6unzRDbEAHuGi8B/uLi++gBlAEEEjknTUwAAAABAFUAAAZNBQ4ALAAAASEVFAYjIiYmNTQ2MzM1NCYjIgYVFBYXByYmNTQ2MzIWFhUVIREjNSEVIxEhBHn+pkZKOI5hSEgVQkUsK1FNEbC3q46YskcBWs0CocH+7QHEIFVQYow7QjzPeWQmHy4rA70OlIB1g2imiNIBcuLi++gAAAIAAAAABUkE+gAVABkAAAEhFRQGIyImJjU0NjMzESM1IRUjESERESERA3X+gkRLOY9gSEkU5QVJwf7t/oIBxCJSUWOMOkE9AXLi4vvoAqYBcv6OAAIAAAAABTIE+gASAB8AAAEGBiMiJic2NjU0JyM1IRUjESEBFhYzMjY3ESEWFRQGA19Bnl2+/yp0cyz3BTLB/u7+AyFoUFeSO/6iNWoBLSoq/+0XX1RFROLi++gCTkZNSEkBzFtjXoYAAgAA/94DnwT6ABoAGwAAARYXByYAJiY1NDYzMhc2NTQnITUhFSMWFRQGAwHDxsyyt/7mdB9VTmNKbQ3+EQOfpBSmjAH0xpq2nQEOl1UoQ1dLR3k5M+LiRVWHxf3OAAD//wAA/6oDnwT6AiYJnQAAAQcL8AMgAT8AAAAAAAEAAP/eBgUE+gAkAAABBgYHJzY3JiMiBhUUFhcHJgI1NDYzMhYXNjYzMzUhNSEVIxEhBDJTdyP/ExlGSTxJirOx38DHmWOdTz2aXAX7zgYFwf7uAogFhX9ORzU5QjZRnIm2sQEDjJ2tMTczNLHi4vvoAAAABQAAAAAGjQT6ABwAKQA1ADYANwAAARYWFRQGIyInBgYjIiY1NDYzMhc2Njc1ITUhFSEBJiMiBhUUFjMyNjY3FxYzMjY1NCYjIgYHExEFG4CK0LfFoTKbi7bf0LfDoidpQvv3Bo3+jv27ZGdLYks6N0I6KdpmZkpjTD1LWzFXA4Ip1ZvF3oY/R+bIxd6HMj4MjOLi/kNab2NZYildbZlab2NaYWqFAzT7BgAA//8AAP6hBo0E+gImCaAAAAEHCacEFABNAAAAAAABAAAAAAT1BPoAHgAAAQYGIyImNTQ2MzIXByYjIgYVFBYzMjcRITUhFSMRIQMhRY5ev+bwyZNMFT1ebGhXUqF4/N8E9cH+7QEBJyXErqrKEeQQTERHSH8CAuLi++gAAAADAEn/7wYdBQ8AKAAwADEAAAE2NjU0JiMiBhUUFhcHJiY1NDYzMhYWFRQGBxYXByYnBiMiJjU0NjMyASMRIREjNSEBAbdcYFxKLDRwXzrB0auehNNzh3txksySizlMW3JdT1wEzMH+7ZgCbPr5AjoromhofC8hQTsKsxKkfoGQZsGBkPdNZLSMvZALXlBKVQGa++gEGOL9BQAAAAADAAAAAAUeBPoAEAATABsAAAEGBiMiLgI1ESM1IRUjESEBAREDARUUFhYzMgNLT41cXZVrK4sFHsH+7v6zAU1w/roaSEtKAU0oITZzmH0BVuLi++gEGP5PAbH98gGp4GFWNwAAAgAA/94GGAT6AB0AJgAAASMiJwYHFhcHJgImJjU0NjMyFzY1NCchNSEVIxEhARYVFAcWMyERBETx+nIaG6Dir7//ax1VTlxFKQ/+YwYYwf7t/mcUKhtfATUB3TkPDLa1srABBo9TJ0NXP0tnNzfi4vvoBBhFVWhUAwFZAAEAAP6mBKwE+gAxAAABIgYVFBYWFwcmJjU0NjcmNTQ+AjMzNSE1IRUjESEiBgYVFBc2MzIWFRQGByc2NTQmAl9xfyd5f7LbpDk6WzVjhnbR/SYErMH+ITo0HSBlctbwoaNKgGkBYVhQMFV5X7a6+X5HeDFcfURiQxxz4uL+qxEoGyIjGrKPgqEm2h5IMzQAAAH9J/5U/lP/hgALAAAFMhYVFAYjIiY1NDb9vT9XVz8/V1d6V0JCV1dCQlcAAQArAEED9gT6ACUAAAEjIg4CFRQWFxYWFRQGIyIkJzcWFjMyNjU0JiYnJiY1NDY2MzMDyeozMigSSXSPatK5vP7ucuFkm2E3PhhJUJJxXKu53AQYBBEaEyNMW3GzcY6o2NRyr4gzLB81TEF3oFxggD8AAAABAAAAAAJ4BPoABwAAASMRIREjNSECeMH+7aQCeAQY++gEGOIAAAD//wAAAAAGZQcsAwYLcAAAAAAAAAAB/eoAAAJ4BywAGQAAARUjESERIzUzJiYjIgYVFBclJjU0NjMyABMCeMH+7aSvN4xfRks4/vZA0rTRARNOBPri++gEGOKrpU5EZ2cBe3qXtf7y/twAAAAAAvwi/dwALwA9ABcAGAAAJTYzMhYVFAYjIiQnNxYWMzI2NTQmIyIHN/3kenKivcesvv7Ho7t81nxISz86SVIfDy6rioamm7mGjIA1MS4xIrMAAAAAAv0E/dwBEQA9ABcAGAAAAwYjIiY1NDYzMgQXByYmIyIGFRQWMzI3A7F6cp3Cyaq7ATinx3DaeEZNPzpJUsD+Ci6qf42rwul4tKE1MS8wIgE0AAAAAv3H/dwAXABRABAAEQAAEwYjIiY1NDY3FwYVFDMyNjcBXIynm8fOwRi2fTRlP/48/iNHpoORrg3SDGpfICABFgAAAAAC/cf82gC0AFEAIAAhAAATBiMiJjU0NyY1NDY3FwYVFDMyNjcXBiMjBhUUFjMyNjcBtIynm8cYcM3CFrR9OGVBSYynCRA5RDdkQ/3e/SFHl3ZAM01+g50MwwldUR8huEcUGicqHiICJwAAAAAC/KgE+gCLBywADQAOAAATBgYjIiYnNxYWMzI2NwGLMvHIsP5K0jaNaGZ7JP6fBuHS0NLMT5WJhJr9zgAC+/ME6/9MBywAFwAYAAABLgIjIgcGIyImJzcWFjMyNzYzMhYWFyX+WBg3QjIbGxoYcpM11iVFPSMkJRxQcGMx/v0E61RdJgIDko9OWzwEA0C4uQ8AAAAAAvx/BOv/TAcsAA0ADgAAASYmIyIHJzYzMh4CFyX+WDtrXkNJSWtsY4J2bC/+/QTryZEb2Skxd+2sDwAAAvxbBOv/SgcsABoAGwAAAS4CIyIHJzYzMhYXNyYmIyIHJzYzMh4CFyX+SjZNRThMXkVyYFN9PgsybmBGQT5pWmp+dnI2/v8E6zo4FjCyMj5FAmxmG68gKG3uvg8AAAD///7uAAAC0QcsAiYJqQAAAQcJsAJGAAAAAAAA///+TgAAAngHLAImCakAAAEHCbECWwAAAAAAAP///toAAAJ4BywCJgmpAAABBwmyAlsAAAAAAAD///62AAACeAcsAiYJqQAAAQcJswJbAAAAAAAAAAL+NP3cASAAAAALAAwAAAU2MzIEFwcmJiMiBwP+NEBQqAETobl4rnYuPxVJEp3Sfp52DAEcAAAABAAoAHsHzQcsAA0AGQBfAGAAAAEGBiMiJic3FhYzMjY3ITIWFRQGIyImNTQ2ATYzMhYVFAcWMzI2Nzc+AjMyFhUUBiMiJzcWFjMyNjU0JiMiBgcHBgYjFhUUBiMiJiYnNxYWMzI1NCcGByc2NjU0IyIHEwa7MvHIsP5K0jCQa2t7H/70MUdIMDBIR/yoqrunwXYjKShFKzdFcIZarNXWybiImClHKUhTQTYnRzgqYZZUA8GyhdfQZOxXw3V4QkVVC2tkhmiE9Qa00tDSzE+ajoqeRjIyRkYyMkb9uVewj5teJjc/T2NcK86yxdNvrR8bWlVMWS9RP5JSFRWVoWL/7Vri33ZhRw8C2AY6Q2RDA0EAAAAAAv5JBPr/XAb5AAMABAAAAxEhERGk/u0G+f55AYf+AQAB/G3+vf9m/3cAAwAAATUhFfxtAvn+vbq6AAAAA/2KBPr/WAcsAAMABAAFAAADIwMhAwOo4O4BOHk9BXYBmv3qAjIAAAP+SQT6ABsHLAADAAQABQAAASEDIwcT/uMBOO7gBEUHEP5mfAIyAP//AAD/KAazBPoCJgmCAAABBwmnA5kA1AAAAAD//wAA/wgHSAT6AiYJgwAAAQcJpwOyALQAAAAA//8AAP9hBP4E+gImCYQAAAEHCacDhAENAAAAAP//AAD+yAalBPoCJgmJAAABBwmnBK8AdAAAAAD//wAA/lQFnwT6AiYJjgAAAQcJpwVUAAAAAAAA//8AAP5UBO4E+gImCY8AAAEHCacEowAAAAAAAP//AAD/UAavBPoCJgmYAAABBwmnA5kA/AAAAAD//wAA/00FMgT6AiYJnAAAAQcJpwOiAPkAAAAAAAMAAP6AB74E+gBOAE8AUAAAAQEnASYmIyIGByc2NjMyFhcRITUhFSERNjcmNTQ2MzIWFRQGBxYXBwYVFDMyNjcXBiMjBhUUFjMyNjcXBiMiJjU0NyYmNTQ2NyYnBgcRIREBAxj984wB7j5fPz53Tlhko1B7p2n86Ad1/LZzYwVjTlpmLCQZcTG0fThlQUmMpwoPOUQ3ZENJjKebxxg4OFdTEw6DrP7tA14Bk/7EvwEWQTUlJ9swK2d9AXDi4v6JBhcVGUdUZE8vTRUrmUsJXVEfIbhHFRknKh4iuEeXdkAzJmVATYAqIB8qCv4zBPr9DAAAAAADAAD+MAaeBPoATABNAE4AAAEiBgcnNjcmIyIGFRQWFwcmAjU0NjMyFhc2NzUhNSEVIRUWFhUUBgcGBhUUMzI2NxcGIyMGFRQWMzI2NxcGIyImNTQ3JjU0Njc2NTQmAxMELkl+Iv8SGkhHPEmKs7HfwMeZZJxOWH38MgYs/rRrbx8oZG19OGVBSYynCg85RDdkQ0mMp5vHGHB0cBM/m3kChIWATkQ5OEI2UZyJtrEBA4ydrTA3SBW64uLLKa51VHdQAzE0UR8huEcVGScqHiK4R5d2QDNNfmOLIjYuSksCdvy+AAAC+/H82gDQABMAMwA0AAATBiMiJjU0Njc2NTQmIyIGByc2NyYjIgYVFBYXByYmNTQ2MzIWFzYzMhYVFAcGBhUUMzI3AdB2hX6iZmEJMy47YxvZEBM3Oi86YZmTvJWeeVWBR2mmjqswXF9jS2H9uP0RN4BlU3cdIBw4OmhhPDooKjIqO3RynJbSbnyHJi5Um4NjZwc0JUoxAk4AAAAC+/H82gF7ABMAPAA9AAABBiMiJjU0NyY1NDcmIyIGByc2NyYjIgYVFBYXByYmNTQ2MzIWFzYzMhYVFRQHBhUUMzI3FwYHBhUUMzI3AQF7c4h/oQxzehk2O2Mb2RATNzovOmGZk7yVnnlVgUdppo6rAaZjS2E/cHUCY0th/Q39DDJvViIeOGlwPiZoYTw6KCoyKjt0cpyW0m58hyYuVJuDCwYGDEVAMZEvAwUKQDECYwAAAAABAWwAAAJ+BPoAAwAAIREhEQFsARIE+vsGAAAAAgFsAAAEaQT6AAMABwAAIREhESERIREDVgET/QMBEwT6+wYE+vsGAAMAbADFBCME+gAMABgAGQAAATIEFRQGBiMiJDU0JBMyNjU0JiMiBhUUFhMCRtYBB3jaidL+9gEG0llwaVpabmhcBFr70YfScP7Ozfz9TYBsanuAbGx5A1MAAAAAAQDa/zMDxwUPACgAABM2NjU0JiMiBhUUFhcHJiY1NDYzMhYVFAYGBxcWFhUUByc2NjU0JicB7uzNNzUuNjMpm15WuaWyxjWcnt1XSHbRIBAmOf6JAkeOvksvMy0jJzEVhDmDVW+QoZFViqFvqEJ1TJN9kyYoFh88LgErAAIAoP/oBA4FDwAhACIAAAUmJwYjIiY1NDYzMhYXNjY1NCYjIgYHJzYzMhYVFAYHFhcBA0KzkDAzXXBdTzVTLEZOb2REc0dSt8rJ8HtyfKX9jhj5lQZfT0pVJiEjfE1jZyAj1VHiyYHWRnjbAcoAAAACALj/ZgPkBQ8ALAAtAAATNjMyFhUUBxYWFRQGBxYXByYnBiMiJjU0NjMyFhc2NTQnBgcnNjY1NCMiBgcTuK3Ir8ZXT0pyb31Exm2KKC5jeV1POV4xhHFSaAt7gY1Ffj2rBLpVrpFzWyh9S2qqNXddj5eOA19PSlUvJyNkZRkTAtgFPEJkJxz9awAAAAMAeP/8BCAFDwAmADIAMwAAAR4CFRQGIyImNTQ2NjcuAjU0NwUGFRQWFhc2NjU0JyUWFRQGBgEGBhUUFjMyNjU0JgMC7GBaHMWwr9YZX2V+hCsXAQoOHlJLXGsNAQgYMYz+5DE/OzQwPTwxAr9iimk+jaOkhztejWhyrIBHPDkvKR8uUnFLXZpBIykvOUU+f63+8TNhLTdCQTYxYAMUAAACAIf/dwSNBQ8AGwAcAAABJjU0NjMyFhUUBxYXBwInIyIkNTQ2NwUGBhUUEwJyAl9PZHtwcY/inGIh+P7zPjoBG0Y7rQJXERpITGpXfTXy1GYBDvLz623pZFFtqFLzApYAAAAAAQCn/zIEewUPADIAAAEmNTQ2MzIXByYjIgYVFBYXNjMyFwcmIyIGFRQXJjU0NjMyFhUUBgcWFwcmJwYjIiY1NAEthtq4qFwUXm9UTikkSFxCPRYiPVtlvgJeUGF6OD1RZddySBYk1fkC7WWkgZgU3hUsMh03EBEM3AZIQoQPDBZITmpVOlIenpdqxq8E2apyAAAAAAIAMwBGBF8E+gAhACIAABMlEhIzMjY1NCYjIgYVFBYXByQRNDYzMhYSFRQGIyImAgIBMwEAHbSfWlRQRyAoWFYk/pyckHa7atHCnt6fZwJdBLEh/if+NHmTnsMrIDM/Br42AQ17jY/+/KXu+HcBAgG0AYcAAgCKAAAERwUPABYAFwAAARcBBgYVFBYzMjY3FwYGIyImNTQ2NjcTAuze/mliMmJZarJkbYztgM72HmFi2AUPtP4XdmM0O0I8QthOQMOyPW+idwHAAAAAAAIAbv8zBFIFDgAbACcAAAEWFwUWFhUUByc2NjU0JwEuAjU0NjYzMhYVECUyNjU0JiMiBhUUFgJcJiwBBVBPdtEXGF7+lGhvK2a4b7Tc/mc8SkU8O0pFAkEYIMM8dFOTfZMaLB4+SwEiU4eIX2WuZc2j/uWSTUBASk8/P0oAAAAAAwCEAh4DIAT6AAsAFwAYAAABIiY1NDYzMhYVFAYDIgYVFBYzMjY1NCYDAdORvruUkL29kT1MTjs8TU07Ah6vjYmxr4uKsgG+Sjg6Sks5OEoBHgAAAwDJBAsCNwcsAAsADAANAAABMhYVFAYjIiY1NDYFAwFfP1dXPz9XVwEXhAU9V0JCV1dCQldDAjIAAAD//wAoAAAHOQcsAiYJcgAAAQcJsAauAAAAAAAAAAEAAP/YBP4E+gAXAAAXNSERIREUBiMiJiY1NDYzMxEjNSEVIxGoAoP+zEJHPJJgSEgV5QT+wSi6A4b94U1NYY47R0ABCOLi+8AAAAABAAD/2AaRBPoAIAAAASMRITUhESMiJxYVFAYjIgADNxYWMzI2NTQnNyE1ITUhBpHB+tgEFZxcGGjCnOb+xm/rWsBwMDjFVwIm+0MGkQQY+8C6AigCbHmCmAE2ATNX8uctK3pmwHziAAEAagAAA6oFDwAUAAAhETY2NTQmIyIGByc2MzIWFRQGBxEBQ6iqbWdEekdSwMLQ7qqqAqQDcGpTWB4i1U7dvp/LK/4hAAAAAAQAAP/YBZ8E+gAsADAAMQAyAAABJiY1NDY2MzM1ITUhFSMRISIHBhUUFzYzMhYVFAYjIiQmJzcWBDMyNTQmIyIBNSEVAQMC2IOrSZGhlPxHBZ/T/l9GFxhCXmqn0Ozfzf7i7mvbcAELxt9FTFb9bARr/qbOAeEinVdDYDpE4uL+/QkMESgfGpV4f5Fat4uAqKpSHCX91rq6BSL7BgAAAAACAAD/2AUTBPoAHgAnAAABFSMRITUhNQYGIyImNTQ2MzIXByYjIyIHFzY3ESE1AQMGBhUUFjMyBRPB/FYCl0mZbbvq88apVBVDZQ0HB78nMPzBAl/QHBxeXi0E+uL7wLqhKCTCpqLIENEQAe4aLwHW4vzCAQQSOSdNTAAAAwC1A+ACQAcsAAYABwAIAAABBgMjEjchAxMCQDJ+20YeARh+hAW/xf7mAS/GAVf9zgAAAAH/yv6tADYGTQADAAADETMRNmz+rQeg+GAAAAAB/tX+rgErBk0ADgAAExcHJxEjEQcnNyc3FzcXSuFLqmyqS+HhS+DgSwUr3Emp+f8GAalJ3NlJ3NxKAAAIAGoA3gOqBB0ACgASABoAIgAqADIAOgBEAAABFAYjIjU0NjMyFgUUIyI1NDMyBRQjIjU0MzITFCMiNTQzMgUUIyI1NDMyExQjIjU0MzIFFCMiNTQzMgUUBiMiNTQzMhYCRR0fOhweHx3+9zs7OzsCEjs7OztcPDs7PP02Ozs7O1w7Ozs7AhI7Ozs7/vcdHzo6Hx0D4x4dOx4cHIQ7Ozs7Ozs7/sY7Ozs7Ozs7/sY7Ozs7Ozs7oR4dOzsd//8AKP7MBuAHLAImCXIAAAAnCbEGwwAAAQcJpwSxAHgAAAAA//8AKP7MBuAFDwImCXIAAAEHCacEsQB4AAAAAP//ACj+zAk7BQ8CJglzAAABBwmnBLEAeAAAAAD//wAA/hkEPAT6AiYJdAAAAQcMJAPz/8UAAAAA//8AAP4ZBDwHLAImCXQAAAAnCg8EDQAAAQcMJAPz/8UAAAAA//8AAP5UBMoE+gImCXYAAAEHCacEqwAAAAAAAP//AAD+VAa9BPoCJgl3AAABBwmnBKsAAAAAAAD//wAA/toHdQT6AiYJeAAAAQcJpwPrAIYAAAAA//8AAP5UBkkE+gImCXkAAAEHCacETQAAAAAAAP//AAD+awT6BywCJgl8AAAAJwmwBGYAAAEHCacD0ABmAAAAAP//AAD+awT6BywCJgl8AAAAJwmxBPYAAAEHCacD0ABmAAAAAP//AAD+awT6BPoCJgl8AAABBwmnA9AAZgAAAAD//wAA/msE+gcsAiYJfAAAACcJsgT2AAABBwmnA9AAZgAAAAD//wAo/swJlAcsAiYJcwAAACcJsAkJAAABBwmnBLEAeAAAAAD//wAo/swJOwcsAiYJcwAAACcJsQkeAAABBwmnBLEAeAAAAAD//wAo/swJOwcsAiYJcwAAACcJsgkeAAABBwmnBLEAeAAAAAD//wAo/swJOwcsAiYJcwAAACcJswkeAAABBwmnBLEAeAAAAAD//wAA/oAHvgT6AiYJxgAAAQcJpwPrAIYAAAAA//8AAP4wBp4E+gImCccAAAEHCacETQAAAAAAAP//ACj+zAc5BywCJglyAAAAJwmwBq4AAAEHCacEsQB4AAAAAP//AAD/RgVGBPoCJgmFAAABBwmnA5gA8gAAAAD//wAA/lQGPQT6AiYJhgAAAQcJpwVUAAAAAAAA//8AAP8oBaME+gImCYcAAAEHCacEGwDUAAAAAP//AAD+VAYxBPoCJgmIAAABBwmnBToAAAAAAAD//wAA/hkGkgT6AiYJigAAAQcMJAPz/8UAAAAA//8AAP8NBr8E+gImCYsAAAEHCacEkwC5AAAAAP//AAD+VARwBPoCJgmMAAABBwmnBJwAAAAAAAD//wAA/lQFDwT6AiYJjQAAAQcJpwS+AAAAAAAA//8AAP8hBn8E+gImCZAAAAEHCacEcgDNAAAAAP//AAD+NwUbBPoCJgmRAAABBwmnA/P/4wAAAAD//wBV/00F2wUPAiYJkgAAAQcJpwQHAPkAAAAA//8AAP7cBJQE+gImCZMAAAEHCacD8wCIAAAAAP//AFf+9wWbBQ8CJgmUAAABBwmnBFAAowAAAAD//wAA/1AFBQT6AiYJlwAAAQcJpwOZAPwAAAAA//8AAP8oBRME+gImCZkAAAEHCacDjwDUAAAAAP//AFX/TQZNBQ4CJgmaAAABBwmnBNsA+QAAAAD//wAA/00FSQT6AiYJmwAAAQcJpwPNAPkAAAAA//8AAP5UBgUE+gImCZ8AAAEHCacFNgAAAAAAAP//AAD/KAT1BPoCJgmiAAABBwmnA3sA1AAAAAD//wBJ/uYGHQUPAiYJowAAAQcJpwQFAJIAAAAA//8AAP9QBR4E+gImCaQAAAEHCacDmQD8AAAAAP//AAD/qgYYBPoCJgmlAAABBwvwAyABPwAAAAD//wAA/fYErAT6AiYJpgAAAQcL8AMQ/4sAAAAAAAIAS/9yBnUFDwA9AEkAAAEGIyImJwYGFRQWMzMmNTQ2MzIWFRQGBxYXByYnBiMiJjU0NjcmJjU0NjMyFhUUBgcWMzI2NzUjNSEVIxEhATY2NTQmIyIGFRQWBKGK4E6bTVBQY2EYA1VOYXkwLkxHv2hGNyTh+GJ4TEzKsrHNRUEeHWSrVpgCbMH+7f1jPj89ODdCOAJvSB0eMVkuPkIREz1JaUg+TRhaQXNwbQS6nV+NSzqMUn6gk35JfDcCMTiv4uL76ANGI0spLzY6MShIAAACAAD/lwXQBPoAJwAoAAABIyInFhYVFAYHFhcHJicmJjU0NjMyFhc2NjU0Jic3ITUhNSEVIxEhJQP8vXs4RUViWH51V+HYiXtLNDZqNDw8sLBXAv/8BAXQwf7t/HACpwgwfU1ajCM3I7talgZSUzxJMzIMPDBVfyrAkOLi++iGAAL9yATrACYHLAAQABEAAAEmNTQ2MzIXByYjIgYVFBYXJ/4mXsSrf3AyPEdPSS4m5ATrnn2Lmx/EFU1LMHc0DwAC/Bz93P/X/9sABgAHAAAFBScBMwEHB/5D/mWMAfKTATa6Yfn7vwEQ/rB/MAAAAAABAAAAAAVQBPoAJwAAAQYGIyImNTQ2MzIXByYjIgYVFBYzMjcRITUhFSERNjMzFSMiBgcRIQMNTopQtuTwxIdJFThaYGlYSpB8/PMFP/7hVIlTZUdZK/7tAQUtI8asp80R5BBMREZJgQIA4uL+nibiHB/+QQAAAAACAAAABQXRBPoAIQA4AAAlBiEiJC4CNTQ2MzIXNjU0JyE1IRUhFhUUBgcWBDMyNjc3BgYjIiY1NDYzMhcHJiMiBhUUFjMyNwXR2P73mv7n5q9pVU5cRSkP/mMFkf0aFIGIfgEXpG/EdRFOll2kvciuc0YUNkpMS0k/f2N8d12dtaBJQ1c/S2c3N+LiRVWDwUSLhCw8aDIoo5CRrBHHDzk1NzlXAAEAAAE0AqAE+gARAAABERQGIyImJjU0NjMzESM1IRUB90JHPJJgSEgV5QKgBBj9tk1NYY47R0ABM+LiAAEAAADNA/sE+gAkAAABBgYjIiY1NDcmNTQ3IzUhFSEGFRQXNjMyFwcmIyIGFRQWMzI3A/tl436+2zONEVMDU/4aGFZBRzI0DRUhU1RTR6iBAYNbW7KUYUpXkkQt4uIhKVoeDgncAzczMjeV//8AAP3cBj0E+gImCYYAAAEHCbgESAAAAAAAAAACAAAAugRZBPoAGAAcAAABBgYjIiY1NDcFNSEyFwcjIgYVFBYzMjY3ATUhFQRZZNN5t91K/uwCekYxFixpaU9EV4E9/DADmgFmWlKqinZHB+QF3UVIOUQ/RwH24uIAAAACAAAAAAYxBPoAOQA9AAABMhYVFAIEIyImNTQ3JjU0NjMyFwcmIyIVFBc2MzIXByYjIgYVFDMyNjY1NCYjIgYVFBYXByYmNTQ2JSE1IQQrxNrG/pTy7vstpMKweEoUP1N7YTlINyoVFyhFPemS74hDPyUvT1Fnm5urAqL5zwYxA8HGtpz+75ilk1BCW517jRHVE0g5Gg4IzgMoKGVfq2pRWigeMUAWuDKebHuOV+IAAAIAAAB7BO4E+gAWABoAAAEjIicWFRQGIyIAAzcSFjMyNjU0JzchJTUhFQTRsGkTcMKc6P7GbetawHAwOMVXAjr7LwTuAqYDeIeLpAFRAUZX/vf+MzKMdcCQ4uIAAQAA/vsFFgT6ADkAAAEGBwYGBxYXByYnJiY1NDYzMhczMjY1NCMiByYmNTQ2NjMzNSE1IRUhESEiBgYVFBc2MzIXFjMyNjcFFoq5Bpd/d41c/OiLeEs0Z2YNdnmMVWCBrU+Rm4r9qgTb/o7+aTgnFkJdZ7JoFxpKhEIBn2UNaZAbMinDZqEIUVE8SV5COVYmJa5hTW05c+Li/sMLFhA0JB5hAzI9AAAAAgAAALYE6wT6ACAAJAAAAQYjIwYGIyIkJzcWFjMyNjU0JiMiBgcnNjMyFhcWMzI3ATUhFQTrP0wVH+Wkzv7Cb+lOwHJcXlFJL0s0U36RmM4iFBZBOfsVBOQB2wyMjezsYampSUVARhIX1TuGeQISAW/i4v//AAD93ASeBPoCJgmMAAABBwm4A34AAAAAAAD//wAA/dwFDwT6AiYJjQAAAQcJuAOFAAAAAAAA//8AAP3cBZ8E+gImCY4AAAEHCbgESAAAAAAAAP//AAD93ATuBPoCJgmPAAABBwm4A5cAAAAAAAAAAgAAAPAELwT6AA4AGwAAAREUBgYjIiYmNREjNSEVISERFB4CMzI+AjUDyFe2j5O1WYsEL/6H/ucKHzYsJzYlDAQY/m2XolxkurYBVOLi/pE9QzkfFTZBTgAAAAACAAD/3gNHBPoAEQAVAAABIyIGBhUUFhcHJgI1NDY2MyElNSEVA0e5e3JBeoCytqV9w7ABCvy5A0MCZBpFO0KVX7aWAQOBcJxC0uLiAAAAAAIAVQDZBJUFDwAlACYAAAEGBiMiJic2NjU0JiMiBhUUFhcHJiY1NDYzMhYVFAYHFhYzMjY3AwSVhtx/wfs5qapJQyotSTs7iZqukbfak44jYj9fmkeAAY9qTN3eI4h0S1UnHy4nCLMTlHR1gsKqkMI5Ly5ETQKu//8AAP3cBU8E+gImCZMAAAEHCbgELwAAAAAAAAACAFcArwRRBQ8ALwAwAAABBgYjIiY1NDcmJjU0NjMyFhUUByc2NTQmIyIGFRQWFzYzMhcHJiMiBhUUFjMyNjcDBFFu2YC/2jdlbMCSiahZnB0qJDQ9aVU/SDI0DRUhU1RTR1SVQHIBZl9Yr5JkSjWoaIKqgGV4TmIcIiElOzJEXg0OCdIDNzMyN0pLAtQAAAACAAABTwM8BPoADQARAAABIRUUBiMiJiY1NDYzIQE1IRUDPP67Q0Q9lF9ISAJs/MQDOAIeNU9LYo07R0ABGOLiAAAAAAEAAAEEA74E+gAWAAABBgYjIi4CNREjNSEVIREUFhYzMjY3A76ExHFakWYpiwLe/r8ZQ0Q9hDQBmlo8N3SWfQFW4uL+u2RVNT01AAACAAAAAAVNBPoAGQAjAAABESERBgYjIi4CNREjNSEVIRE2MzMVIyIGAREUFhYzMjY3EQQc/u42elVakWYpiwU8/uBTi1NlPVz9ThlDRDZhNgHE/jwBPx4dN3SWfQFW4uL+oCTiFQIz/rtkVTUmLQHgAAAAAAMAAAC1A6UE+gAUABgAIQAAAQYGIyImNTQ2MzIXByYjIyIHFzY3ATUhFQMDBgYVFBYzMgOlYc6Gu+r0xalUFUNlDQcHvycw/MEDXP3QHBxeXi0BRU5Cx6unzRDbEAHuGi8B/uLi/XwBBBI5J01MAAACAFUA/wR5BQ4AJAAoAAABIRUUBiMiJiY1NDYzMzU0JiMiBhUUFhcHJiY1NDYzMhYWFRUhAzUzFQR5/qZGSjiOYUhIFUJFLCtRTRGwt6uOmLJHAVrNtwHEIFVQYow7QjzPeWQmHy4rA70OlIB1g2imiNIBcuLiAAABAAAA/wN1BPoAFQAAASEVFAYjIiYmNTQ2MzMRIzUhFSERIQN1/oJESzmPYEhJFOUDX/6YAX4BxCJSUWOMOkE9AXLi4v6OAAABAAAA2QPqBPoAGwAAAQYGIyImJzY2NTQnIzUhFSEWFRQGBxYWMzI2NwPqgst6vv8qdHMs9wMO/vM1amohaFBXkjsBj2tL/+0XX1RFROLiW2NehihGTUhJAAAAAAIAAAG4A3kE+gADABAAAAEhNSETBiMiJCc3FhYzMjY3A238kwNtDHqykP74ilSKvmFomVAEGOL9AUNUT8hLOSouAAAAAgAA/94ErAT6AB8AIwAAASIGByc2NyYjIgYVFBYXByYCNTQ2MzIWFzY2MzIXByYBNSEVBDxUfyT/ExlGSTxJirOx38DHmWOdTz2aXEE+UAn7rQR3AomIgk5HNTlCNlGcibaxAQOMna0xNzM0Ec8CAY/i4gAEAAAARgZWBPoAFQAZACYAMgAAATIWFRQGIyInBgYjIiY1NDYzMhc2NiU1IRUBJiMiBhUUFjMyNjY3FxYzMjY1NCYjIgYHBJC33tC3xaEym4u239C3w6IqlvwJBlb8gGRnS2JLOjdCOinaZmZKY0w9S1sxA5blyMXehj9H5sjF3oc2UILi4v5DWm9jWWIpXW2ZWm9jWmFqhQAAAAACAAAAtQOHBPoAFgAaAAABBgYjIiY1NDYzMhcHJiMiBhUUFjMyNwE1IRUDh1zLcL/m8MmTTBU9XmxoV1KhePzfAz4BRUhIxK6qyhHkEExER0h/AgLi4gAAAgBJ/+8DhAUPACgAKQAAATY2NTQmIyIGFRQWFwcmJjU0NjMyFhYVFAYHFhcHJicGIyImNTQ2MzIHAbdcYFxKLDRwXzrB0auehNNzh3txksySizlMW3JdT1w7AjoromhofC8hQTsKsxKkfoGQZsGBkPdNZLSMvZALXlBKVX8AAAACAAABBAOlBPoADwAXAAABBgYjIi4CNREjNSEVIQEHARUUFhYzMgOlZcJrXZVrK4sDUf6tAU1w/roaSEtKAYBBOzZzmH0BVuLi/k9dAangYVY3AAABAAD/3gQwBPoAIgAAASMiJwYHFhcHJgImJjU0NjMyFzY1NCchNSEVIRYVFAcWMyEEMN36choboOKvv/9rHVVOXEUpD/5jBBr+kRQqG18BIQHdOQ8MtrWysAEGj1MnQ1c/S2c3N+LiRVVoVAMAAAAAAQAA/qYEigT6ACgAAAEVISIGBhUUFhYXByYmNTQ3JjU0PgIzMzUhNSEVIxEhIgYGFRQXNjMEiv4Aam5DKH16suGecFg1Y4Z2dP2DBF3P/n46NB0fYbECP+MeTTgvV3xbtr/1hY1eXHpEYkMcc+Li/qsRKBsiIhQAAgBL/3IEeAUPADUAQQAAAQYjIiYnBgYVFBYzMyY1NDYzMhYVFAYHFhcHJicGIyImNTQ2NyYmNTQ2MzIWFRQGBxYzMjY3JTY2NTQmIyIGFRQWBHiAwU6bTVBQY2EYA1VOYXkwLkxHv2hGNyTh+GJ4TEzKsrHNRUEeHUV9OP3OPj89ODdCOAJcNR0eMVkuPkIREz1JaUg+TRhaQXNwbQS6nV+NSzqMUn6gk35JfDcCFxgXI0spLzY6MShIAAADAAD/lwP8BPoAHwAjACQAAAEjIicWFhUUBgcWFwcmJyYmNTQ2MzIWFzY2NTQmJzchJTUhFQED/L17OEVFYlh+dVfh2Il7SzQ2ajQ8PLCwVwL//AQD+Px0AqcIMH1NWowjNyO7WpYGUlM8STMyDDwwVX8qwJDi4vxuAAD//wAA/ygFUAT6AiYKEQAAAQcJpwOZANQAAAAA//8AAP8IBdEE+gImChIAAAEHCacDsgC0AAAAAP//AAD/YQKgBPoCJgoTAAABBwmnA4QBDQAAAAD//wAA/0YD+wT6AiYKFAAAAQcJpwOYAPIAAAAA//8AAP3cBj0E+gImCYYAAAEHC7kESAAAAAAAAP//AAD/KARZBPoCJgoWAAABBwmnBBsA1AAAAAD//wAA/lQGMQT6AiYKFwAAAQcJpwU6AAAAAAAA//8AAP7IBO4E+gImChgAAAEHCacErwB0AAAAAP//AAD+GQUWBPoCJgoZAAABBwwkA/P/xQAAAAD//wAA/w0E6wT6AiYKGgAAAQcJpwSTALkAAAAA//8AAP3cBNAE+gImCYwAAAEHC7kDfgAAAAAAAP//AAD93AUPBPoCJgmNAAABBwu5A4UAAAAAAAD//wAA/dwFnwT6AiYJjgAAAQcLuQRIAAAAAAAA//8AAP3cBO4E+gImCY8AAAEHC7kDlwAAAAAAAP//AAD/IQQvBPoCJgofAAABBwmnBHIAzQAAAAD//wAA/jcDRwT6AiYKIAAAAQcJpwPz/+MAAAAA//8AVf9NBJUFDwImCiEAAAEHCacEBwD5AAAAAP//AAD93AVPBPoCJgmTAAAAJwm4BC8AAAEHCacDewCcAAAAAP//AFf+9wRRBQ8CJgojAAABBwmnBFAAowAAAAD//wAA/38DPAT6AiYKJAAAAQcJpwO2ASsAAAAA//8AAP9QA74E+gImCiUAAAEHCacDmQD8AAAAAP//AAD/UAVNBPoCJgomAAABBwmnA5kA/AAAAAD//wAA/ygDpQT6AiYKJwAAAQcJpwOPANQAAAAA//8AVf9NBHkFDgImCigAAAEHCacE2wD5AAAAAP//AAD/TQN1BPoCJgopAAABBwmnA80A+QAAAAD//wAA/00D6gT6AiYKKgAAAQcJpwOiAPkAAAAA//8AAP/pA3kE+gImCisAAAEHCacEMwGVAAAAAP//AAD+VASsBPoCJgosAAABBwmnBTYAAAAAAAD//wAA/qEGVgT6AiYKLQAAAQcJpwQUAE0AAAAA//8AAP8oA4cE+gImCi4AAAEHCacDewDUAAAAAP//AEn+5gOEBQ8CJgovAAABBwmnBAUAkgAAAAD//wAA/1ADpQT6AiYKMAAAAQcJpwOZAPwAAAAA//8AAP+qBDAE+gImCjEAAAEHC/ADIAE/AAAAAP//AAD99gSKBPoCJgoyAAABBwvwAxD/iwAAAAAAAwAA/6sG0wT6AC4ALwAwAAABESERASc3JiY1NDYzMhcHJiMiBhUUFjMyNxEhNSEVIRE2MzIWFRQGByc2NTQjIgEBBED+7f2fjPRhaPHDh0kVOFpgaVhKkHz80wbT/W1hfpW3S0TvbHJQ/pb+uAH7/gUBJf6Gv4MspXGkyBHkEExERkmBAgDi4v7jOrecZd5nhJV6iAKn+7sAAAAAAwAA/twHSAT6ABgAOwA8AAAlASclLgM1NDYzMhc2NTQnITUhFSMRIQEWFRQGBxYEMzI3NzUGIyImNTQ2MzIXByYjIgYVFBYzMjcRAQV0/LWMAVGM+bdzVU5cRSkP/mMHSMH+7f03FIGIfgEXpEo8/26fpL3IrnNGFDZKTEtJP3xm/nyT/km/lyOYu6lMQ1c/S2c3N+Li++gEGEVVg8FEi4QIeTI9o5CRrBHHDzk1NzlWAZz77QAAAAEAAP/JBP4E+gAZAAABERQGIyImJjU0NjMzESM1IRUjESE1BScBEQH3Qkc8kmBISBXlBP7B/u7+bYwCHwQY/dRNTWGOO0dAARXi4vvow/q/ASQCbAAAAAADAAD/rQVGBPoAFAAqACsAAAEBJyUmJjU0NyYmNTQ3IzUhFSMRIQEGFRQXNjMyFwcmIyIGFRQWMzI2NxEBA3L9howBDGpyMkNJEVMFRsH+7f37GFZBRzI0DRUhU1RTR1OVQf7DATb+d7+QIpNpXkYpeEhELeLi++gEGCEpWh4OCdwDNzMyN0lKAdb8tQAAAP//AAD93AY9BPoCJgmGAAABBwoQBQkAAAAAAAAAAgAA/6cFowT6ACAAIQAAAQEnNyYmNTQ3BTUhMhcHIyIGFRQWMzI2NxEhNSEVIxEhJQPQ/Z+M+WBnSv7sAnpGMRYsaWlPRFeBPfwwBaPB/u7+2QEh/oa/hiWKWXNEB+QF3UVIOUQ/RwH24uL76LoA//8AAP3cBjEE+gImCYgAAAEHChAFBgAAAAAAAAACAAD/yQcJBPoAJAAlAAABISInFhYVFAYjIgADNxYWMzI2NTQmJzchNSE1IRUjESE1BScBAQU1/uxaGCssuZTX/rVv61e8azYvW1tXAp76ywcJwf7t/m2MAh/9HgKmAi9gMnmRAR4BF1fU0SkoMlonwJDi4vvow/q/AST+zwAAAAIAAP77BxME+gAvAEIAAAEGBgcGBgcWFwcmJyYmNTQ2MzIXMzI2NTQjIgcmJjU0NjYzMzUhNSEVIxEhNQcnAQERISIGBhUUFzYzMhcWMzI2NxEFP0nGXAKYg3eNXPzoi3hLNGdmDXZ5jFVgga1PkZuK/aoHE8H+7dumAYH+Kv5pOCcWQl1nq2UzQVS6SgGsNkEBaZYcMinDZqEIUVE8SV5COVYmJa5hTW05c+Li++ia0aoBEgKT/sMLFhA0JB5YDEg8AYgAAAAAAQAA/8kHDwT6ACwAAAEGIyMGBiMiJCc3FhYzMjY1NCYjIgYHJzYzMhYXFjMyNxEhNSEVIxEhNQUnAQU7YXoVH+Wkzv7Cb+lOwHJcXlFJL0s0U36RmM4iFBZwWvrFBw/B/u3+n4wB7QIPIoyN7OxhqalJRUBGEhfVO4Z5AjgBK+Li++io378BCQAAAP//AAD93ARwBPoCJgmMAAABBwoQBCAAAAAAAAD//wAA/dwFDwT6AiYJjQAAAQcKEARzAAAAAAAA//8AAP3cBZ8E+gImCY4AAAEHChAFCQAAAAAAAP//AAD93ATuBPoCJgmPAAABBwoQBHoAAAAAAAAAAgAA/8kGfwT6ABYAIwAAAREUBgYjIiYmNREjNSEVIxEhNQUnAREhIREUHgIzMj4CNQPIV7aPk7VZiwZ/wf7t/m2MAh/+C/7nCh82LCc2JQwEGP6Vl6JcZLq2ASzi4vvow/q/ASQCbP65PUM5HxU2QU4AAAABAAAAAATsBPoAGAAAAQEnASYmIyIGByc2NjMyFhcRITUhFSMRIQMY/fOMAe4+Xz8+d05YZKNQe6dp/OgE7MH+7QGT/sS/ARZBNSUn2zArZ30BcOLi++gAAgBV/68F2wUPACwALQAAAQEnJSYmJzY2NTQmIyIGFRQWFwcmJjU0NjMyFhUUBgcWMzI2NxEjNSEVIxEhJQQI/XOMARh7pCmpqklDKi1JOzuJmq6Rt9qTjkd9X5pHgAJTwf7u/qwBQ/5sv5YpyJ8jiHRLVScfLicIsxOUdHWCwqqQwjldRE0BzOLi++jZAAAABAAA/0sElAT6ACoAKwAsAC0AABM0Njc1ITUhFSERIyIOAhUUFjMyNyY1NDYzMhYVFAYHFhcHJicBJyUmJgEBJ0v96P3QBJT+rml+gFcnjI0XDgNhTGZ5MTwvZN9VQ/3ujAE6kpIB5QEL2wInq8UMdeLi/qsVL0ArWVkCGBNCS21VPFAcVoRkipP+ur+iLb0DZPsGggAAAAACAFf/hAWbBQ8ANwA4AAABASclJiY1NDcmJjU0NjMyFhUUByc2NTQmIyIGFRQWFzYzMhcHJiMiBhUUFjMyNjcRIzUhFSMRISUDx/1wjAEfaHE2ZGzAkomoWZwdKiQ0PWlVP0gyNA0VIVNUU0dUlUByAkbB/u3+wwEY/my/miKSZmBHNadpgqqAZXhOYhwiISU7MkReDQ4J0gM3MzI3SksB8uLi++ivAAABAAD/yQUPBPoAGQAAASEVFAYjIiYmNTQ2MyERITUhFSMRITUFJwEDPP67Q0Q9lF9ISAJs/MQFD8H+7v5tjAIfAh41T0tijTtHQAEY4uL76MP6vwEkAAAAAwAA/90FBQT6AA4AGAAZAAABASclJiY1ESM1IRUjESEBERQWFjMyNjcRAQMy/aOMAR5he4sFBcH+7v5rGUNEPYQ0/tMBUf6Mv5Mhr8MBVuLi++gEGP67ZFU1PTUBwfzsAAAAAAQAAP/dBq8E+gAfACgAKQAqAAABESERASclLgI1ESM1IRUhETYzMhYVFAYHJzY1NCMiAREUFhYzMjcRNQEEHP7u/cuMARNGZCeLBq/9bWF+lbdLRO9sclD9KhlDRGti/vsB+/4FAUT+mb+UGnOTcgFW4uL+4zq3nGXeZ4SVeogBxf67ZFU1WAHb4vwKAAAAAAMAAP+gBTIE+gAcACUAJgAAAQEnJSYmNTQ2MzIXByYjIyIHFzY3ESE1IRUjESEDAwYGFRQWMzIHA179bowBCGxy9MWpVBVDZQ0HB78nMPyiBTLB/u3g0BwcXl4tbwEr/nW/iCupdKTIENsQAe4aLwH+4uL76AGUAQQSOSdNTNgAAQBV/8kGTQUOADAAAAEhFRQGIyImJjU0NjMzNTQmIyIGFRQWFwcmJjU0NjMyFhYVFSERIzUhFSMRITUFJwEEef6mRUs5kF5JRxVCRSwrUU0RsLerjpiyRwFazQKhwf7t/m2MAh8B3B5NS2GGN0A6t3lkJh8uKwO9DpSAdYNopoi6AVri4vvow/q/ARoAAAAAAgAA/8kFSQT6ABkAHQAAASEVFAYjIiYmNTQ2MzMRIzUhFSMRITUFJwERESERA3X+gkVKOZJdSUgU5QVJwf7t/m2MAh/+ggHcH0xLYoU3PzsBWuLi++jD+r8BGgEcAVr+pgAAAAMAAP+5BTIE+gASAB8AIAAAAQEnJSYmJzY2NTQnIzUhFSMRIQEWFjMyNjcRIRYVFAYTA1/9h4wBEnWdHnRzLPcFMsH+7v4DIWhQV5I7/qI1alcBQ/52v5Mx46YXX1RFROLi++gCTkZNSEkBzFtjXob+YwD////h/dwDnwT6AiYJnQAAAQcKEAPFAAAAAAAAAAEAAP/eBkEE+gArAAABIgYHJzY3JiMiBhUUFhcHJgI1NDYzMhYXNjYzMhc1ITUhFSMRITUHJwE1JgQ8VH4l/xMZRkk8SYqzsd/Ax5ljnU89mlwhIPuSBkHB/u7uogGQEwKJhoRORzU5QjZRnIm2sQEDjJ2tMTczNAW24uL76N3GrQEbpAYAAP//AAD93AaNBPoCJgmgAAABBwoQBggAAAAAAAAAAgAA/64FEwT6AB8AIAAAAQEnNyYmNTQ2MzIXByYjIgYVFBYzMjY3ESE1IRUjESElA0D9jIz/aWzwyZNMFT1ebGhXUlCNPPzABRPA/u3+sAEn/oe/gymkcqbGEeQQTERHSD9AAgLi4vvotQAAAgAZAAAGRAUPACIALgAAAQEnASYmJwYFJzY3JiY1NDYzMhYVFAYHFhYXESM1IRUjESEBNjY1NCYjIgYVFBYEcP2RjAHWWJhAWv7kjLx0V1fMrbPOU1ddwmZsAkDB/u39pT09PTg4QToBr/5+vwEBGDQcN5q/VDlAkVB+oJN+WIRCHiUHAYLi4vvoAz4oSS4wNToxKEwAAAAEAAD/3QUeBPoADwASABoAGwAAAQEnJS4CNREjNSEVIxEhAQERAwEVFBYWMzIHA0v9iowBHk5mKIsFHsH+7v6zAU1w/roaSEtKeQFh/ny/kx1zknEBVuLi++gEGP5PAbH98gGp4GFWN+EAAAACAAD/3gZyBPoAIAAoAAABBSclISInBgcWFwcmAiYmNTQ2MzIXNjU0JyE1IRUjESEBFhUUBxchEQSe/uyMAVz++cBqM0Sg4q+//2sdVU5cRSkP/mMGcsH+7f4NFBAvAcABaLm/tSApH7a1srABBo9TJ0NXP0tnNzfi4vvoBBhFVT46AQETAAAAAQAA/kAE1AT6ADMAAAEiBhUUFhYXByYCNTQ2NyY1ND4CMzM1ITUhFSMRISIGBhUUFzYzMgQVFAYHJzY3BSclJgJ7hIgweXqyzrU9PWI1Y4Z2+fz+BNTB/fk6NB0iYm/xAQEfGuMnBP7MhAFlPAFraWJHcohptrcBH5NUjDdfgURiQxxz4uL+qxEoGyQiG8OwR5g2T1ZTy7avHQAAAgBL/3IHAQUPAEIATgAAAQYGIyImJwYGFRQWMzMmNTQ2MzIWFRQGBxYXByYnBiMiJjU0NjcmJjU0NjMyFhUUBgcWMzI2NzUhNSEVIxEhNQcnAQE2NjU0JiMiBhUUFgUtc9GDZLNOUFBjYRgDVU5heTAuTEe/aEY3JOH4YnhMTMqysc1CQyo1euZs/twC+MH+7cauAXT81z4/PTg3QjgCfzImHR4xWS4+QhETPUlpSD5NGFpBc3BtBLqdX41LOoxSfqCTfkh6OQM9PZ7i4vvo4cmhATEBXCNLKS82OjEoSAACAAD/lwZXBPoAKwAsAAABISInFhYVFAYHFhcHJicmJjU0NjMyFhc2NjU0Jic3ITUhNSEVIxEhEQcnAQEEg/68ezhFRWJYfnVX4diJe0s0Nmo0PDywsFcDhvt9BlfB/u3MrgF6++kCpwgwfU1ajCM3I7talgZSUzxJMzIMPDBVfyrAkOLi++gBIM+hATb+Xv//AAD+iwbTBPoCJgpXAAABBwmnBDUANwAAAAD//wAA/twHSAT6AiYKWAAAAQcJpwMqAT8AAAAA//8AAP5UBP4E+gImClkAAAEHCacD5QAAAAAAAP//AAD+iwVGBPoCJgpaAAABBwmnBFMANwAAAAD//wAA/dwGPQT6AiYJhgAAACcKEAUJAAABBwvwA0kA6wAAAAD//wAA/m0FowT6AiYKXAAAAQcJpwS3ABkAAAAA//8AAP3cBjEE+gImCYgAAAAnChAFBgAAAQcL8AMsAKMAAAAA//8AAP6QBwkE+gImCl4AAAEHCacErwA8AAAAAP//AAD+GQcTBPoCJgpfAAABBwwkA/P/xQAAAAD//wAA/swHDwT6AiYKYAAAAQcJpwSTAHgAAAAA//8AAP3cBHAE+gImCYwAAAAnChAEhAAAAQcL8AMNAMIAAAAA//8AAP3cBQ8E+gImCY0AAAAnChAEmwAAAQcL8AMVAMMAAAAA//8AAP3cBZ8E+gImCY4AAAAnChAFCQAAAQcL8ANJAOsAAAAA//8AAP3cBO4E+gImCY8AAAAnChAEmAAAAQcL8AMRAMEAAAAA//8AAP6QBn8E+gImCmUAAAEHCacEUgA8AAAAAP//AAD+0ATsBPoCJgpmAAABBwmnA/8AfAAAAAD//wBV/oEF2wUPAiYKZwAAAQcJpwTVAC0AAAAA//8AAP5UBJQE+gImCmgAAAEHCacEhwAAAAAAAP//AFf+VQWbBQ8CJgppAAABBwmnBJkAAQAAAAD//wAA/lQFDwT6AiYKagAAAQcJpwPKAAAAAAAA//8AAP6VBQUE+gImCmsAAAEHCacEIQBBAAAAAP//AAD+lQavBPoCJgpsAAABBwmnBCEAQQAAAAD//wAA/lkFMgT6AiYKbQAAAQcJpwROAAUAAAAA//8AVf5UBk0FDgImCm4AAAEHCacFLAAAAAAAAP//AAD+VAVJBPoCJgpvAAABBwmnBG4AAAAAAAD//wAA/oEFMgT6AiYKcAAAAQcJpwRSAC0AAAAA////4f3cA58E+gImCZ0AAAAnChADxQAAAQcL8AMMAT8AAAAA//8AAP5zBkEE+gImCnIAAAEHCacFAQAfAAAAAP//AAD93AaNBPoCJgmgAAAAJwoQBggAAAEHCacDpQB0AAAAAP//AAD+WQUTBPoCJgp0AAABBwmnBD8ABQAAAAD//wAZ/p0GRAUPAiYKdQAAAQcJpwQ8AEkAAAAA//8AAP6VBR4E+gImCnYAAAEHCacEKwBBAAAAAP//AAD/qgZyBPoCJgp3AAABBwvwAyABPwAAAAD//wAA/d4E1AT6AiYKeAAAAQcL8AL1/3MAAAAAAAIAAP+rBXAE+gAnACgAAAERIREBJzcmJjU0NjMyFwcmIyIGFRQWMzI3ESE1IRUhETYzMxUjIgYBBED+7f2fjPRhaPHDh0kVOFpgaVhKkHz80wVf/uFUiVNlR1n9egG//kEBJf6Gv4MspXGkyBHkEExERkmBAgDi4v6eJuIc/tcAAwAA/twF1QT6ACEAOAA5AAAlASclLgM1NDYzMhc2NTQnITUhFSEWFRQGBxYEMzI/AgYGIyImNTQ2MzIXByYjIgYVFBYzMjcBBdX8VIwBUYz5t3NVTlxFKQ/+YwWR/RoUgYh+ARekSjz/NEyZXKS9yK5zRhQ2SkxLST98Zv58uv4iv5cjmLupTENXP0tnNzfi4kVVg8FEi4QIeU8xKaOQkawRxw85NTc5Vv2JAAIAAP/JA5kE+gARABUAAAERFAYjIiYmNTQ2MzMRIzUhFRMBJwEB90JHPJJgSEgV5QMid/3/jAIfBBj91E1NYY47R0ABFeLi/On+yL8BJAAAAgAA/60D+wT6ACYAJwAAAQEnJSYmNTQ3JiY1NDcjNSEVIQYVFBc2MzIXByYjIgYVFBYzMjY3AQP7/P2MAQxqcjJDSRFTA1P+GhhWQUcyNA0VIVNUU0dTlUH+wwGD/iq/kCKTaV5GKXhIRC3i4iEpWh4OCdwDNzMyN0lK/osAAP//AAD82gY9BPoCJgmGAAABBwvYBQkAAAAAAAD//wAA/dwGPgT6AiYLugAAAQcL1wUJAAAAAAAAAAMAAP+nBFYE+gAYABwAHQAAAQEnNyYmNTQ3BTUhMhcHIyIGFRQWMzI2NwE1IRUDBFb9GYz5YGdK/uwCekYxFixpaU9EV4E9/DADmvEBaf4+v4YlillzRAfkBd1FSDlEP0cB9uLi/KL//wAA/dwGMQT6AiYKFwAAAQcKEAUGAAAAAAAAAAQAAP/JBaME+gAYABwAIAAhAAABISInFhYVFAYjIgADNxYWMzI2NTQmJzchJTUhFRMBJwEBBTX+7FoYKyy5lNf+tW/rV7xrNi9bW1cCnvrLBVJR/f+MAh/9HgKmAi9gMnmRAR4BF1fU0SkoMlonwJDi4vzp/si/AST+zwACAAD++wW0BPoAOgA+AAABBgYHBgYHFhcHJicmJjU0NjMyFzMyNjU0IyIHJiY1NDY2MzM1ITUhFSERISIGBhUUFzYzMhcWMzI2NwMnARcFP0nGXAKYg3eNXPzoi3hLNGdmDXZ5jFVgga1PkZuK/aoFXP4N/mk4JxZCXWerZTNBVLpK26YBgXUBrDZBAWmWHDIpw2ahCFFRPEleQjlWJiWuYU1tOXPi4v7DCxYQNCQeWAxIPP05qgESngAAAAMAAP/JBakE+gAgACQAKAAAAQYjIwYGIyIkJzcWFjMyNjU0JiMiBgcnNjMyFhcWMzI3ATUhFQEnARcFO2F6FR/lpM7+wm/pTsByXF5RSS9LNFN+kZjOIhQWcFr6xQVY/oKMAe1uAg8ijI3s7GGpqUlFQEYSF9U7hnkCOAEr4uL7sb8BCasAAP//AAD82gSDBPoCJgmMAAABBwvYBCAAAAAAAAD//wAA/dwEgwT6AiYLvAAAAQcL1wQgAAAAAAAA//8AAPzaBQ8E+gImCY0AAAEHC9gEcwAAAAAAAP//AAD93AUPBPoCJgu9AAABBwvXBHMAAAAAAAD//wAA/NoFnwT6AiYJjgAAAQcL2AUJAAAAAAAA//8AAP3cBZ8E+gImC74AAAEHC9cFCQAAAAAAAP//AAD82gTuBPoCJgmPAAABBwvYBHoAAAAAAAD//wAA/dwE7gT6AiYLvwAAAQcL1wR6AAAAAAAAAAMAAP/JBRkE+gAOABsAHwAAAREUBgYjIiYmNREjNSEVISERFB4CMzI+AjUBAScBA8hXto+TtVmLBKL+FP7nCh82LCc2JQwCY/3/jAIfBBj+lZeiXGS6tgEs4uL+uT1DOR8VNkFO/i7+yL8BJAAAAAACAAAAVwM2BPoAEAAUAAABAScBJiYjIgYHJzY2MzIWFwE1IRUDNv3VjAHuPl8/PndOWGSjUIyya/zKAysBp/6wvwEWQTUlJ9swK4SEAZTi4gAAAAADAFX/rwSVBQ8AJAAlACYAAAEBJyUmJic2NjU0JiMiBhUUFhcHJiY1NDYzMhYVFAYHFjMyNjcDAwSV/OaMARh7pCmpqklDKi1JOzuJmq6Rt9qTjkd9X5pH0YMBj/4gv5YqyJ4jiHRLVScfLicIsxOUdHWCwqqQwjldRE0Crvvf//8AAP3cBU8E+gImCmgAAAEHCbgELwAAAAAAAAADAFf/hARRBQ8ALwAwADEAAAEBJyUmJjU0NyYmNTQ2MzIWFRQHJzY1NCYjIgYVFBYXNjMyFwcmIyIGFRQWMzI2NwETBFH85owBIGtvNmRswJKJqFmcHSokND1pVT9IMjQNFSFTVFNHVJVA/sPLAWb+Hr+aI5JlYEc1p2mCqoBleE5iHCIhJTsyRF4NDgnSAzczMjdKS/6JBEsAAAADAAD/yQOqBPoADQARABUAAAEhFRQGIyImJjU0NjMhATUhFRMBJwEDPP67Q0Q9lF9ISAJs/MQDWVH9/4wCHwIeNU9LYo07R0ABGOLi/On+yL8BJAACAAD/3QO9BPoAFAAVAAABASclJiY1ESM1IRUhERQWFjMyNjcBA739GIwBHmF7iwLe/r8ZQ0Q9hDT+0wGc/kG/kyGvwwFW4uL+u2RVNT01/q0AAAADAAD/3QVNBPoAGAAhACIAAAERIREBJyUuAjURIzUhFSERNjMzFSMiBgERFBYWMzI3EQEEHP7u/cuMARNGZCeLBTz+4FOLU2U9XP1OGUNEa2L++wHE/jwBRP6Zv5Qac5NyAVbi4v6gJOIVAjP+u2RVNVgB2/zsAAAEAAD/oAPDBPoAFAAYACEAIgAAAQEnJSYmNTQ2MzIXByYjIyIHFzY3ATUhFQMDBgYVFBYzMgcDw/0JjAEIbHL0xalUFUNlDQcHvycw/KIDevzQHBxeXi1vAUv+Vb+IK6l0pMgQ2xAB7hovAf7i4v18AQQSOSdNTNgAAAAAAwBV/8kE5wUOACQAKAAsAAABIRUUBiMiJiY1NDYzMzU0JiMiBhUUFhcHJiY1NDYzMhYWFRUhAzUzFRMBJwEEef6mRUs5kF5JRxVCRSwrUU0RsLerjpiyRwFazepR/f+MAh8B3B5NS2GGN0A6t3lkJh8uKwO9DpSAdYNopoi6AVri4vzp/si/ARoAAAACAAD/yQPjBPoAFQAZAAABIRUUBiMiJiY1NDYzMxEjNSEVIREhEwEnAQN1/oJFSjmSXUlIFOUDkv5lAX5u/f+MAh8B3B9MS2KFNz87AVri4v6m/kP+yL8BGgAAAAIAAP+5A+oE+gAbABwAAAEBJyUmJic2NjU0JyM1IRUhFhUUBgcWFjMyNjcBA+r8/IwBEnWdHnRzLPcDDv7zNWpqIWhQV5I7/sQBj/4qv5Mx46YXX1RFROLiW2NehihGTUhJ/o0AAAD////q/dwDpQT6AiYKKwAAAQcKEAPOAAAAAAAAAAMAAP/eBQ8E+gAgACQAKAAAASIGByc2NyYjIgYVFBYXByYCNTQ2MzIWFzY2MzIWFwcmASEVIQEBJwEEPFR+Jf8TGUZJPEmKs7HfwMeZY51PPZpcNW0pVC/7iwTF+zsFD/5xogHIAomGhE5HNTlCNlGcibaxAQOMna0xNzM0FxTOGwJx4v00/sutAUP//wAA/dwGVgT6AiYKLQAAAQcKEAYIAAAAAAAAAAMAAP+uA6UE+gAXABsAHAAAAQEnNyYmNTQ2MzIXByYjIgYVFBYzMjY3ATUhFQEDpf0njP9pbPDJk0wVPV5saFdSUI08/MADXf6TAUf+Z7+DKaRypsYR5BBMREdIP0ACAuLi/J0AAAAAAwAZAC0EjAUPABoAJgAqAAABAScBJiYnBgUnNjcmJjU0NjMyFhUUBgcWFhclNjY1NCYjIgYVFBYlNTMVBHD9kYwB1liYQFr+5Iy8dFdXzK2zzlNXXcJm/aU9PT04OEE6AimIAa/+fr8BARg0HDeav1Q5QJFQfqCTfliEQh4lB6goSS4wNToxKEy14uIAAAAAAwAA/90DogT6AA4AFgAXAAABASclLgI1ESM1IRUhAQcBFRQWFjMyBwOi/TOMAR5OZiiLA1H+rQFNcP66GkhLSnkBiP5Vv5Mdc5JxAVbi4v5PXQGp4GFWN+EAAAABAAD/3gTuBPoAJQAAAQUnJSEiJwYHFhcHJgImJjU0NjMyFzY1NCchNSEVIRYVFAcXIRUE7v6cjAFc/vnAajNEoOKvv/9rHVVOXEUpD/5jBLv98BQQLwHAAYTVv7UgKR+2tbKwAQaPUydDVz9LZzc34uJFVT46AdIAAAD//wAA/dwFnAT6AiYKeAAAAQcJuAR8AAAAAAAAAAQAS/9yBZoFDwA2AEIARgBKAAABBgYjIiYnBgYVFBYzMyY1NDYzMhYVFAYHFhcHJicGIyImNTQ2NyYmNTQ2MzIWFRQGBxYzMjY3BTY2NTQmIyIGFRQWJTUhFQMnARcFLXPRg2SzTlBQY2EYA1VOYXkwLkxHv2hGNyTh+GJ4TEzKsrHNQkMqNXrmbPzXPj89ODdCOAI+AUHjrgF0bQJ/MiYdHjFZLj5CERM9SWlIPk0YWkFzcG0Eup1fjUs6jFJ+oJN+SHo5Az09NCNLKS82OjEoSLHi4vwAoQExqQAEAAD/lwTpBPoAHwAjACcAKAAAASEiJxYWFRQGBxYXByYnJiY1NDYzMhYXNjY1NCYnNyElNSEVAycBFwUEg/68ezhFRWJYfnVX4diJe0s0Nmo0PDywsFcDhvt9BKDprgF6ZvuDAqcIMH1NWowjNyO7WpYGUlM8STMyDDwwVX8qwJDi4vw5oQE2sfEA//8AAP6LBXAE+gImCp0AAAEHCacENQA3AAAAAP//AAD+3AXVBPoCJgqeAAABBwmnAyoBPwAAAAD//wAA/lQDmQT6AiYKnwAAAQcJpwPlAAAAAAAA//8AAP6LA/sE+gImCqAAAAEHCacEUwA3AAAAAP//AAD82gY9BPoCJgmGAAAAJwvYBQkAAAEHC/ADSQDrAAAAAP//AAD93AY+BPoCJgu6AAAAJwvXBVkAAAEHC/ADRQGvAAAAAP//AAD+bQRWBPoCJgqjAAABBwmnBLcAGQAAAAD//wAA/dwGMQT6AiYKFwAAACcKEAUGAAABBwvwAywAowAAAAD//wAA/pAFowT6AiYKpQAAAQcJpwSvADwAAAAA//8AAP4ZBbQE+gImCqYAAAEHDCQD8//FAAAAAP//AAD+zAWpBPoCJgqnAAABBwmnBJMAeAAAAAD//wAA/NoFDwT6AiYJjAAAACcL2ASsAAABBwvwAw0AwgAAAAD//wAA/dwFDwT6AiYLvAAAACcL1wSsAAABBwvwAwgBxgAAAAD//wAA/NoFRAT6AiYJjQAAACcL2AThAAABBwvwAxUAwwAAAAD//wAA/dwFRAT6AiYLvQAAACcL1wThAAABBwvwAxUBtgAAAAD//wAA/NoFnwT6AiYJjgAAACcL2AUJAAABBwvwA0kA6wAAAAD//wAA/dwFvAT6AiYLvgAAACcL1wVZAAABBwvwA0UBrwAAAAD//wAA/NoFQAT6AiYJjwAAACcL2ATdAAABBwvwAxEAwQAAAAD//wAA/dwFQAT6AiYLvwAAACcL1wTdAAABBwvwAxEBtgAAAAD//wAA/pAFGQT6AiYKsAAAAQcJpwRSADwAAAAA//8AAP7aAzYE+gImCrEAAAEHCacD6wCGAAAAAP//AFX+gQSVBQ8CJgqyAAABBwmnBNUALQAAAAD//wAA/dwFTwT6AiYKaAAAACcJuAQvAAABBwwkA9v/uAAAAAD//wBX/lUEUQUPAiYKtAAAAQcJpwSZAAEAAAAA//8AAP5UA6oE+gImCrUAAAEHCacDygAAAAAAAP//AAD+lQO9BPoCJgq2AAABBwmnBCEAQQAAAAD//wAA/pUFTQT6AiYKtwAAAQcJpwQhAEEAAAAA//8AAP5ZA8ME+gImCrgAAAEHCacETgAFAAAAAP//AFX+VATnBQ4CJgq5AAABBwmnBSwAAAAAAAD//wAA/lQD4wT6AiYKugAAAQcJpwRuAAAAAAAA//8AAP6BA+oE+gImCrsAAAEHCacEUgAtAAAAAP///+r93AOlBPoCJgorAAAAJwoQA84AAAEHCacEMwHRAAAAAP//AAD+cwUPBPoCJgq9AAABBwmnBQEAHwAAAAD//wAA/dwGVgT6AiYKLQAAACcKEAYIAAABBwmnA6UAdAAAAAD//wAA/lkDpQT6AiYKvwAAAQcJpwQ/AAUAAAAA//8AGf6dBIwFDwImCsAAAAEHCacEPABJAAAAAP//AAD+lQOiBPoCJgrBAAABBwmnBCsAQQAAAAD//wAA/6oE7gT6AiYKwgAAAQcL8AMgAT8AAAAA//8AAP3cBZwE+gImCngAAAAnC/AC9f9zAQcJuAR8AAAAAAAAAAIAAP0yBKwE+gBIAEkAAAEmAjU0NjcmNTQ+AjMzNSE1IRUjESEiBgYVFBc2MzIWFRQGByc2NjU0JiMiBhUUFhc2MzIWFRQGIyIkJzcWFjMyNjU0JiMiBzcCJ+XlOTlaNWOGdtH9JgSswf4hOjQdIGVy1++iokpAQGlocn6liR0cor3HrL7+x6O7fNZ8SEs/OklSkv68dAESn0R0L1x8RGJDHHPi4v6rESgbIiMaqIZ8oSbaDS0gLS1XS16UPQOrioamm7mGjIA1MS4xIrMAAAABAAD93ASsBPoARwAABSYmNTQ2NyY1ND4CMzM1ITUhFSMRISIGBhUUFzYzMhYVFAYHJzY2NTQmIyIGFRQWFhc2MzIWFRQGIyIkJzcWFjMyNTQjIgcCC+zCNDVRNWOGdtH9JgSswf4hOjQdHWtv1+97emMpIWlodnokY1omM6O8wKam/taZn3W6fZN9QU3Hgc9+PmUoWHZEYkMcc+Li/qsRKBshIBaffmSIJbUNIBcpKT42JDpJLgehhYGceYGdal9WURoAAwAA/TIE+AT6AEcASABJAAAFJiY1NDY3JjU0PgIzMzUhNSEVIxEhIgYGFRQXNjMyFhUUBgcnNjY1NCYjIgYVFBYXFgQXByYmIyIGFRQWMzI3FwYjIiY1NCU1AVJ7ejk5WjVjhnbR/SYErMH+ITo0HSBlctfvoqJKQEBpaHJ+ln+pASWmx3DaeEZNPzpJUkZ6cp3CAdfAXtBzRHQvXHxEYkMcc+Li/qsRKBsiIxqohnyhJtoNLSAtLVdLXZQ8CbvmeLShNTEvMCLCLqp/kGs9AAEAAP3cBNEE+gBIAAAFJiY1NDY3JjU0PgIzMzUhNSEVIxEhIgYGFRQXNjMyFhUUBgcnNjY1NCYjIgYVFBYWFxYEFwcmJiMiBhUUFjMyNxcGIyImNTQBI2heNDVRNWOGdtH9JgSswf4hOjQdHWtv1+9zfmcpIWlodnojXVWvATWpx2/beEZNPzpJUkZ6cp7BN06ZVz5lKFh2RGJDHHPi4v6rESgbISAWn35qhS3ADSAXKSk+NiM5RywCsNp4opUtKSgpIsIuoniEAAAAAAEAAP4pBQ0E+gA9AAABIgYVFBYXByYCNTQ2NyY1ND4CMzM1ITUhFSMRISIGBhUUFzYzMgQVFAcjIgYGFRQzMjY3FwYjIiY1NDcmAnuGhoihst6rPT5jNWOGdvn8/gTUwf35OjQdImJv8QEBBwgqfkZ9NGU/T4ynm8fgLAFhZmZjvpW22QEViVeQOF6DRGJDHHPi4v6rESgbJCIbw7A+KRo6LV8gIMdHpoPYSVsAAAAAAQAA/ckFYwT6AEsAAAEiBhUUFhcHJgI1NDY3JjU0PgIzMzUhNSEVIxEhIgYGFRQXNjMyBBUUBwYGFRQzMjY3FwYjIwYVFBYzMjY3FwYjIiY1NDcmNTQ3JgJ7hoaIobLeqz0+YzVjhnb5/P4E1MH9+To0HSJib/EBAQODeX04ZUFJjKcKDzlEN2RDSYynm8cYcNI0AWFmZmO+lbbZARWJV5A4XoNEYkMcc+Li/qsRKBskIhvDsBouBzY1UR8huEcVGScqHiK4R5d2QDNNfsBKQwAAAAACAAD93ASsBPoARgBSAAAFJiY1NDY3JjU0PgIzMzUhNSEVIxEhIgYGFRQXNjMyFhUUBgcnNjY1NCYjIgYVFBYWFzYzMhYVFAYjICc3FhYzMjU0IyIHJTIWFRQGIyImNTQ2AgvswjQ1UTVjhnbR/SYErMH+ITo0HR1rb9fve3pjKSFpaHZ6JGNaJjOjvMCm/vrmhk+WY5N9QU3+gzZJSTY2SkrHgc9+PmUoWHZEYkMcc+Li/qsRKBshIBaffmSIJbUNIBcpKT42JDpJLgehhYGcnKc9OFZRGqhKODhKSjg4SgAAAAACAAD93AUrBPoASgBWAAAFJiY1NDY3JjU0PgIzMzUhNSEVIxEhIgYGFRQXNjMyFhUUBgcnNjY1NCYjIgYVFBYWFzYzMgQXByYmIyIGFRQWMzI3FwYjIiY1NCcyFhUUBiMiJjU0NgFxj4U0NVE1Y4Z20f0mBKzB/iE6NB0da2/X73N+ZykhaWh2eiZjWDAnsAErqcdmzXFASTo2Q01GemyWu682SUk2NkpKbVqyaD5lKFh2RGJDHHPi4v6rESgbISAWn35qhS3ADSAXKSk+NiU7SC0Hsdx4oZYrKygpIsIuoXlZcEo4OEpKODhK//8AAP3eBQ0E+gImCvEAAAEHC/AC9f9zAAAAAP//AAD9yQVjBPoCJgryAAABBwvwAvX/cwAAAAAAAgAA/TIE/AT6AEkASgAAASYCNTQ2NyY1ND4CMyE1ITUhFSMRISIGBhUUFzYzMgQVFAYHJzY3BSclJiMiBhUUFhc2MzIWFRQGIyIkJzcWFjMyNjU0JiMiBzcCJ+XlOTlaNWOGdgEh/NYE/MH90To0HSFoePcBFR8a4yoB/syEAVBIZIaUpYkdHKK9x6y+/seju3zWfEhLPzpJUpL+vHQBEp9EdC9cfERiQxxz4uL+qxEoGyQhGsqpR5g2T1xWy7alHl1PXpQ9A6uKhqabuYaMgDUxLjEiswAAAQAA/dwE/AT6AEgAAAUmJjU0NjcmNTQ+AjMhNSE1IRUjESEiBgYVFBc2MzIEFRQGByc2NwUnJSYjIgYVFBYWFzYzMhYVFAYjIiQnNxYWMzI1NCMiBwIL4M4xMUo1Y4Z2ASH81gT8wf3ROjQdHm6A9wEKHhvnLQH+tnUBVkJrhpEkWVIvPKO8wKam/taZn3W6fZN9QU3Gdt2ON2AoVHJEYkMcc+Li/rwRKBsjHxq6okGRNk9WRpKhghZNRCpBSC0KoYWBnHmBnWpfVlEaAAAAAAMAAP0yBPwE+gBIAEkASgAABSYmNTQ2NyY1ND4CMyE1ITUhFSMRISIGBhUUFzYzMgQVFAYHJzY3BSclJiMiBhUUFhcWBBcHJiYjIgYVFBYzMjcXBiMiJjU0JTUBUXp6OTlaNWOGdgEh/NYE/MH90To0HSFoePcBFR8a4yoB/syEAVBIZIaUln+pASWmx3DaeEZNPzpJUkZ6cp3CAdfAXtBzRHQvXHxEYkMcc+Li/qsRKBskIRrKqUeYNk9cVsu2pR5dT12UPAm75ni0oTUxLzAiwi6qf5BrPQAAAAABAAD93AT8BPoASgAABSYmNTQ2NyY1ND4CMyE1ITUhFSMRISIGBhUUFzYzMgQVFAYHJzY1NQUnJSYjIgYVFBYXMzIEFwcmJiMiBhUUFjMyNxcGIyImNTQBI2NjMTFKNWOGdgEh/NYE/MH90To0HR5ugPUBDB8a5S3+tXUBVT9thpFiXAq6ATiox2/beEZNPzpJUkZ6cp7BN0uiZTdgKFRyRGJDHHPi4v68ESgbIx8axq1HmDZPYFYLk6GBF01EQmEztNl4opUtKSgpIsIuoniEAAAAAAIAAP/eBSME+gArACwAAAEWFwcmACYmNTQ2MzIXNjU0JyE1IRUhFhUVNjMyFhUUBgcnNjU0JiMiBgcGAwHDxsyyt/7mdB9VTmNKbQ3+EQUj/dgUOzuPqkc+72E9NC1KMk15AfTGmradAQ6XVShDV0tHeTkz4uJFVQwWp5VXvFOFclc6OxwlQwLVAAAAAgAA/94G/wT6ADcAOAAAARYXByYAJiY1NDYzMhc2NTQnITUhFSEWFRQHFhc2NjMyBBcHJiYjIgYVFBYzMjcXBiMiJjUmJwYDAcPGzLK3/uZ0H1VOY0ptDf4RBv/7/BQKKzUujV6+AQKTx22iZzpKPTNHPkZqdZGzQFRReAH0xpq2nQEOl1UoQ1dLR3k5M+LiRVUqLwsbOzy5/mzCkzYwLzAdwimqgCQISALX//8AAP3cBMQE+gImCZMAAAEHCawElQAAAAAAAP//AAD93AW5BPoCJgmTAAABBwmtBKgAAAAAAAAABAAA/dwE+AT6ADkAOgA7ADwAABM0Njc1ITUhFSERIyIOAhUUFjMyNyY1NDYzMhYVFAYHFhcHBgYVFDMyNjcXBiMiJjU0NyYnBiMiJAEBF0v96P3QBJT+rml+gFcnjI0XDgNhTGZ5MTw3cGdOVX00ZT9PjKebx7AaIBte8v7dAeUCCBgCJ6vFDHXi4v6rFS9AK1lZAhgTQkttVTxQHGCWIBhDMmUgIMdHpoO1VjFHBtoDnvtX0gAA//8AAP+qBSME+gImCvsAAAEHC/ADIAE/AAAAAP//AAD/qgb/BPoCJgr8AAABBwvwAyABPwAAAAAABAAA/dwEwwT6AEEATQBOAE8AABM0Njc1ITUhFSERIyIOAhUUFjMyNyY1NDYzMhYVFAYHFhcWFhUUBiMiJCc3FhYzMjY1NCYjIgYHJzY3JicGIyIkFzIWFRQGIyImNTQ2AQFL/ej90ASU/q5pfoBXJ4yNFw4DYUxmeTE8JjdTXLabvf7lhLtfr3c5QzszIDcqRDgtDB8bXvL+3Zc2SUk2NkpKAYQCGAInq8UMdeLi/qsVL0ArWVkCGBNCS21VPFAcRU0jkmOHpZGlhntzNy8uMQwRvRUKF0MG2t5KODhKSjg4SgR8+0MA//8AAP3cBbkE+gImCZMAAAAnCa0EqAAAAQcL8AMvAPcAAAAA//8AAP3cBPgE+gImCv8AAAEHCacDrQCIAAAAAP///eoAAAJ6BywCJgmrAAABBwlvAxIAZAAAAAAAAf3qAAACqwcsACcAAAEVIxEhESM1MyYmIyIGFRQXJSY1NDYzMhYXNjYzMhcHJiMiBhUVFhcCeMH+7aSvN4xfRks4/vZA0rRbnEIpiWF/cDI8R0xMKxkE+uL76AQY4qulTkRnZwF7epa2OTg4OR/EFUxMCV5lAAAAAv3qAAACqwcsACcAMwAAARUjESERIzUzJiYjIgYVFBclJjU0NjMyFhc2NjMyFwcmIyIGFRUWFxMyFhUUBiMiJjU0NgJ4wf7tpK83jF9GSzj+9kDStFucQimJYX9wMjxHTEwrGYMsPz8sLD8/BPri++gEGOKrpU5EZ2cBe3qWtjk4ODkfxBVMTAleZQETPywsPz8sLD8AAAAD+7YE+gBYBywADQAZABoAAAMGBiMiJic3FhYzMjY3BTIWFRQGIyImNTQ2Ad0kzbCd7ULSKHFTVmIWAYA/V1c/P1dX/sYG4dDS08tPkoyMkoJXQkJXV0JCV/5QAAL7rQTrAF4HLAAcAB0AAAEmJwYjIiYnNxYWMzI2Nxc2MzIXByYjIgYVFBYXJf5eIRpGZJ3tQtIocVNWYhZqVnZ/cDI8R09JLib+5ATrNj8h08tPkoyMkicnH8QVTUswdzQPAAAD+60E6wBeBywAHAAoACkAAAEmJwYjIiYnNxYWMzI2Nxc2MzIXByYjIgYVFBYXEzIWFRQGIyImNTQ2Af5eIRpGZJ3tQtIocVNWYhZqVnZ/cDI8R09JLiZzLD8/LCw/P/6dBOs2PyHTy0+SjIySJycfxBVNSzB3NAEhPywsPz8sLD/+7gAD+/ME6wAjBywAFwAjACQAAAEuAiMiBwYjIiYnNxYWMzI3NjMyFhYXEzIWFRQGIyImNTQ2Af5YGDdCMhsbGhhykzXWJUU9IyQlHFBwYzFBP1dXPz9XV/77BOtUXSYCA5KPTls8BANAuLkCQVdCQldXQkJX/c4AAAAC+/ME6wBQBywAIgAjAAABLgIjIgcGIyImJzcWFjMyNzYzMhc2NjMyFwcmIyIGBxYXJf5YGDdCMhsbGhhykzXWJUU9IyQlHCocKJdkf3AyPEdOSAImI/79BOtUXSYCA5KPTls8BAMHR1AfxBVLSViHDwAAAAP78wTrAFAHLAAiAC4ALwAAAS4CIyIHBiMiJic3FhYzMjc2MzIXNjYzMhcHJiMiBgcWFxMyFhUUBiMiJjU0NgH+WBg3QjIbGxoYcpM11iVFPSMkJRwqHCiXZH9wMjxHTkgCJiN+LD8/LCw/P/6rBOtUXSYCA5KPTls8BAMHR1AfxBVLSViHASI/LCw/PywsP/7tAAAD/H8E6wAjBywADQAZABoAAAEmJiMiByc2MzIeAhcTMhYVFAYjIiY1NDYB/lg7a15DSUlrbGOCdmwvQT9XVz8/V1f++wTryZEb2Skxd+2sAkFXQkJXV0JCV/3OAAL8fwTrAFAHLAAXABgAAAEmJiMiByc2MzIWFzYzMhcHJiMiBgcWFyX+WDtrXkNJSWtscHMqX59/cDI8R01JAiMm/v0E68mRG9kpNC9jH8QVSkhYiQ8AAAAD/H8E6wBQBywAFwAjACQAAAEmJiMiByc2MzIWFzYzMhcHJiMiBgcWFxMyFhUUBiMiJjU0NgH+WDtrXkNJSWtscHMqX59/cDI8R01JAiMmfiw/PywsPz/+qwTryZEb2Sk0L2MfxBVKSFiJASI/LCw/PywsP/7tAAAD/FsE6wAjBywAGgAmACcAAAEuAiMiByc2MzIWFzcmJiMiByc2MzIeAhcTMhYVFAYjIiY1NDYB/ko2TUU4TF5FcmBTfT4LMm5gRkE+aVpqfnZyNkM/V1c/P1dX/vsE6zo4FjCyMj5FAmxmG68gKG3uvgJBV0JCV1dCQlf9zgAAAAL8WwTrAFAHLAAlACYAAAEuAiMiByc2MzIWFzcmJiMiByc2MzIWFhc2MzIXByYjIgYHFhcl/ko2TUU4TF5FcmBTfT4LMm5gRkE+aVpTWFAkX59/cDI8R09IAScg/v8E6zo4FjCyMj5FAmxmG68gESsnYx/EFU5MZXQPAAAD/FsE6wBQBywAJQAxADIAAAEuAiMiByc2MzIWFzcmJiMiByc2MzIWFhc2MzIXByYjIgYHFhcTMhYVFAYjIiY1NDYB/ko2TUU4TF5FcmBTfT4LMm5gRkE+aVpTWFAkX59/cDI8R09IAScggCw/PywsPz/+qwTrOjgWMLIyPkUCbGYbryARKydjH8QVTkxldAEiPywsPz8sLD/+7f///ekAAAKLBywCJgmpAAABBwsIAjMAAAAAAAD///4IAAACuQcsAiYJqQAAAQcLCQJbAAAAAAAA///+CAAAArkHLAImCakAAAEHCwoCWwAAAAAAAP///k4AAAJ+BywCJgmpAAABBwsLAlsAAAAAAAD///5OAAACqwcsAiYJqQAAAQcLDAJbAAAAAAAA///+TgAAAqsHLAImCakAAAEHCw0CWwAAAAAAAP///toAAAJ+BywCJgmpAAABBwsOAlsAAAAAAAD///7aAAACqwcsAiYJqQAAAQcLDwJbAAAAAAAA///+2gAAAqsHLAImCakAAAEHCxACWwAAAAAAAP///rYAAAJ+BywCJgmpAAABBwsRAlsAAAAAAAD///62AAACqwcsAiYJqQAAAQcLEgJbAAAAAAAA///+tgAAAqsHLAImCakAAAEHCxMCWwAAAAAAAAAD/cgE6wAmBywAEAAcAB0AAAEmNTQ2MzIXByYjIgYVFBYXEzIWFRQGIyImNTQ2Af4mXsSrf3AyPEdPSS4mcyw/PywsPz/+1QTrnn2Lmx/EFU1LMHc0ASI/LCw/PywsP/7tAAD//wAoAAAG5gcsAiYJcgAAAQcLCwbDAAAAAAAA//8AAP77BDwHLAImCXQAAAEHCyAEDQAAAAAAAP//AAD+awUNBywCJgl8AAABBwsIBLUAAAAAAAD//wAA/msFGQcsAiYJfAAAAQcLCwT2AAAAAAAA//8AAP5rBRkHLAImCXwAAAEHCw4E9gAAAAAAAP//ACgAAAlOBywCJglzAAABBwsICPYAAAAAAAD//wAoAAAJQQcsAiYJcwAAAQcLCwkeAAAAAAAA//8AKAAACUEHLAImCXMAAAEHCw4JHgAAAAAAAP//ACgAAAlBBywCJglzAAABBwsRCR4AAAAAAAD//wAoAAAG8wcsAiYJcgAAAQcLCAabAAAAAAAA//8AKP7MBuYHLAImCXIAAAAnCwsGwwAAAQcJpwSxAHgAAAAA//8AAP4ZBDwHLAImCXQAAAAnCyAEDQAAAQcMJAPz/8UAAAAA//8AAP5rBQ0HLAImCXwAAAAnCwgEtQAAAQcJpwPQAGYAAAAA//8AAP5rBRkHLAImCXwAAAAnCwsE9gAAAQcJpwPQAGYAAAAA//8AAP5rBRkHLAImCXwAAAAnCw4E9gAAAQcJpwPQAGYAAAAA//8AKP7MCU4HLAImCXMAAAAnCwgI9gAAAQcJpwSxAHgAAAAA//8AKP7MCUEHLAImCXMAAAAnCwsJHgAAAQcJpwSxAHgAAAAA//8AKP7MCUEHLAImCXMAAAAnCw4JHgAAAQcJpwSxAHgAAAAA//8AKP7MCUEHLAImCXMAAAAnCxEJHgAAAQcJpwSxAHgAAAAA//8AKP7MBvMHLAImCXIAAAAnCwgGmwAAAQcJpwSxAHgAAAAAAAIAAP/eB8sE+gAqACsAAAE1ITUhFSERNjMyFhUUBgcnNjU0IyIHESERMSMiBgYVFBYXByYCNTQ3BTUBBCX72wfL/W1hfpW3S0TvbHJQV/7ttmtvRXqAsralG/7yA9sDRtLi4v7jOrecZd5nhJV6iFj+BQJkFkU/QpVftpYBA4E+NAjkAbQAAAADAAAAAAtjBPoAOQBJAFUAAAEmJjU0PgIzMzUhNSEVIxEhNQYjIiYnNjY1NCYmIyEiBgYVFBc2MzIWFRQGIyIkJzcWBDMyNTQjIgEVITIWFhUUBgcWFjMyNxEBMhYVFAYjIiY1NDYC2IKsNV1/amz8bwtjwf7ubZi17z5jayA5RfxSNCgZPVZ3qs3w3Pb+a7HUkQEbntaRUwFlAjS2oVtXVCJfQ4Zn/EA8T088PFBQAX4mrWFAXjwZc+Li++iCNdnhEEgzHCQJCRgULyIbqY2Pq9TTmrWmaFYCdHM4e15Qgyc4M1gCkf6SUT09UVE9PVEAAAAAAwAAAAALCQT6AE8AXwBgAAABByQ1NDY3NSE1IRUjESE1BiMiJic2NjU0JiYjIyInFhUUAgQjIiY1NDcmNTQ2MzIXByYjIhUUFzYzMhcHJiMiBhUUMzI2NjU0JiMiBhUUFhMhMhYWFRQGBxYWMzI3ESEHBIFn/spxbfw+CwnB/u5tmLXvPmNrIDlFKF5NHcb+lPLu+y2kwrB4ShQ/U3thOUg3KhUXKEU96ZLviEM/JS9PpQGptqFbV1QiX0OGZ/ufqgI0uGXXY4UXYeLi++iCNdnhEEgzHCQJBkFbnP7vmKWTUEJbnXuNEdUTSDkaDgjOAygoZV+ralFaKB4xQAFbOHteUIMnODNYApFXAAMAAP3cBHAE+gAyADMANAAAASMiDgIVFBYzMjY3FwYHESMiDgIVFBYzMjY3FwYjIiQ1NCQ3NSYkNTQkNzUhNSEVISURA1CrbHZJHoiNX6pfVWtzq2x2SR6IjV+qX1XW+/f+5AEM5+3++gEM5/3DBHD+4P7tAuURJywYQz4mLNkvFv7lEScsGEM+JizZXMOmkrIFMgjBoJKyBWHi4uL44gAEAAD82gUNBPoASABJAEoASwAAAQYjIiY1NDY3JiY1NCQ3NSYkNTQkNzUhNSEVIREjIgYGFRQWMzI2NxcGBxUjIgYGFRQWMzI2NxcGBxYWFwcmJiMiBhUUFjMyNwMTBwNLd3WewUdHnacBAPPz/wABAPP9wwRw/uDcgmcvgpNepWVVZnjcgmcvgpNepWVVX2Zo0mrHcdp3R0w+O0hTyHlF/QMpmnFNcycenneJnAMtB6qUiZwDV+Li/uwhMR4/NSAqxCcX/iExHj81ICrEJRMmq4Vso5AwLCosHwdI+ghIAAQAAP3cBK0E+gApADgAOQA6AAABICQ1NCQ3NSYkNTQkNzUhNSEVIREjIg4CFRQWMzI2NxcGBxUWFhUUBAMjIg4CFRQWMzI2NTQmAxECfv73/tUBCevt/vkBDOf9wwSt/qOrbHZJHoiNX6pfVWtzqLD+5b4sO2dmP4+Pjo5k9f4hwqeRsgYyCMGgkrIFYeLi/s0RJywYQz4mLNkvFlkxuX+anwHgBhw3J0NEPT0xSgUL+OIAAAUAAPzaBT8E+gBAAE4ATwBQAFEAAAEmJjU0JDM1JiQ1NCQ3NSE1IRUhESMiBgYVFBYzMjY3FwYHFRYWFRQGBxYWFwcmJiMiBhUUFjMyNxcGIyImNTQ2ASMiBgYVFBYzMjY1NCYDEwcBubW6AQrq8v7+AQDz/cMErf6j3IJnL4KTXqVlVWZ4rKyGhmjRasdx2ndHTD47SFNGd3WewUMBWk93cD2Lk42PZvN5Rf7IHKB7iZ8tBauViZwDV+Li/uwhMR4/NSAqxCcXUCyid2KAHiWrhWyjkDAsKiwfrymacUtyAcgcMyVAOTQ6LEMEoPoISAAAAAACAAAAAAkLBPoAKwA7AAABFSMRITUGIyImJzY2NTQmJiMhFSMiBgYVFBYzMjY3FwYGIyIkNTQkNzUhNQEhMhYWFRQGBxYWMzI3ESEJC8H+7m2Yte8+Y2sgOUX+z3mdkkydk1OaYFZz4X7s/tkBD+T9wwNQATC2oVtXVCJfQ4Zn/BgE+uL76II12eEQSDMcJAk9MWQ/bXAsNeY1M/7TtuYMn+L+qzh7XlCDJzgzWAKRAAYAAP3cBQ8E+gAgAC8APgA/AEAAQQAAASEVFhYVFAYHFRYWFRQEISAkNTQkNzUmJDU0JDc1ITUhASMiDgIVFBYzMjY1NCYDIyIOAhUUFjMyNjU0JgMTAQUP/kGosK6qqLD+5f7x/vf+1QEJ6+7++gEJ6/3CBQ/9wCw7Z2Y/j4+OjmRjLDtnZj+Pj46OZPQJ/mcEGHExuX94lxtRMbl/mp/Cp5GyBjMNv5yRsgZh4v3rBhw3J0RDPT0xSv0uBhw3J0NEPT0xSgUL+OIDKQAAAAADAAAAAAnsBPoAJAA0AEIAABEhFSMRITUGIyImJzY2NTQmJiMjIicnFhUUBCMiJCY1NCQ3NSEFITIWFhUUBgcWFjMyNxEhAyMiBgYVFBYzMjY1NCYJ7MH+7m2Yte8+Y2sgOUWRgE0UuP7a/6H++ZEBC+n9wgNQAhG2oVtXVCJfQ4Zn+zeBCp+LP5iGiJRjBPri++iCNdnhEEgzHCQJDyib4cLUdNSKtuQNn3M4e15Qgyc4M1gCkf6GPV49bHhqY017AAAABAAA/dwFnwT6AEUATwBQAFEAAAEmJjU0NjYzMzUhNSEVIxUhIgYVFBc2MzIWFRQGBxUjIg4CFRQXJjU0NjMyFhUUBCMgJDU0JDcmJCc3FgQzMjY1NCYjIhEVFBc2NTQmIyITAQLYgqxRkZmU/EcFn9P+iHAuQl5qsMeroZ2JglUpmwazrZ+t/vv3/uL+xwEU/Mf+s4eyowEbrHBvSElZAdItMHaA/o4CASCVU0ReMD3i4vEWDyceGo54aoYSvxApOSduHRobW2NsXnSAs6SZuwcauqGUrZ4mKiIe/NMJBQUCLxIaBbz44gAAAAADAAD93AWfBPoATQBOAE8AAAEgJCc3FgQzMjY1NCYjIgcmJjU0NyYmJzcWBDMyNjU0IyIHJiY1NDY2MzM1ITUhFSMRISIHBhUUFzYzMhYVFAYjIicGFRQXNjMyFhUUBgMDA2z+9v5moLmNATKkbXJJSFlhg6sdd89ZuY0BMqRtcpFZYYOrUZCalPxHBZ/T/ohrGxhCXmqwx+zfOztAQl5qsMfsklD+D9LQkbCxKC4iIiAgnlo/LDSodZGxsCguRCAgnlpIZTNH4uL++wsMEyweGpZ/hZIFBiEsHhqWf4WSBuv44gAEAAD82gXVBPoAZQBmAGcAaAAAASYkJzcWBDMyNjU0JiMiByYmNTQ3JiYnNxYEMzI2NTQmIyIHJiY1NDY2MzM1ITUhFSMVISIGFRQXNjMyFhUUBiMiJwYVFBc2MzIWFRQGBxYWFwcmJiMiBhUUFjMyNxcGIyImNTQ2AQM3Ak+i/u1yuY0BMqRtckZLXlyBrR11z1u5jQExpW9wRkteXIGtSY6klPxHBZ/T/ohiPEJfabHG7N9IK0NCX2mxxnFtXL5ix3Had0dMPjtIU0Z3dZ7BQwGudQj+ySathoKfnyUpHx4dHY5ROCktl2uCn54lKB8eHR2OUT1aM0Di4usQFicbF4hxeIMEAyAnHBiIcVJzHSiie2yjkDAsKiwfrymacUxyBlf5r1kAAAIAAAAACloE+gA5AEkAAAEmJjU0PgIzMzUhNSEVIxEhNQYjIiYnNjY1NCYmIyEiBgYVFBc2MzIWFRQGIyIkJzcWBDMyNTQjIgEVITIWFhUUBgcWFjMyNxEC2IKsNV1/apT8Rwpawf7ubZi17z5jayA5Rf1bNCgZPVZ3qs3w3Pb+a7HUkQEbntaRUwGNAQO2oVtXVCJfQ4ZnAX4mrWFAXjwZc+Li++iCNdnhEEgzHCQJCRgULyIbqY2Pq9TTmrWmaFYCdHM4e15Qgyc4M1gCkQAFAAD93ATuBPoAOgBEAE4ATwBQAAABIyIOAhUUFyY1NDYzMhYVFAYHESMiDgIVFBcmNTQ2MzIWFRQEIyAkNTQkJTUkJDU0JCU1ITUhFSEDFRQXNjU0JiMiERUUFzY1NCYjIgMDA4OdiYJVKZsGs62frZCLnYmCVSmbBrOtn63++/f+4v7HASgBBf7y/uEBKAEF/YgE7v6VsgHSLTB2AdItMHZZMQL0ECk5J28dGhxbY2xeVXUZ/voQKTknbh0aG1tjbF50gLOkn7sCMQiynZ+7AmDi4v3ACgQFAi8SGvzSCQUFAi8SGgW8+OIAAAMAAAAACboE+gAuAD4ASAAAARUjESE1BiMiJic2NjU0JiYjISIGBhUUFhcmNTQ2MzIWFRQEIyIkJjU0JCU1ITUBITIWFhUUBgcWFjMyNxEhAzY1NCYjIgYVFAm6wf7ubZi17z5jayA5Rf3BnZdgUVAMt6mcsP7u6rL+7ZIBIwEK/YgDgwGstqFbV1QiX0OGZ/ucmLkwKTQ8BPri++hiNdnhEEgzHCQJLG1RVXodMDeAlpt7l7V22o7F7Qp+4v6LOHteUIMnODNYArH8uAxuJy9BMi0AAQAA/94F+QT6ABkAAAEjIgYGFRQWFwcmAjU0NwU1ITUhNSEVIxEhBCW2a29FeoCytqUb/vID2/vbBfnB/u0CZBZFP0KVX7aWAQOBPjQI5NLi4vvoAAAAAAIAAP/eBCUE+gARABUAAAEjIgYGFRQWFwcmAjU0NwU1ISU1IRUEJbZrb0V6gLK2pRv+8gPb+9sEIQJkFkU/QpVftpYBA4E+NAjk0uLiAAAABgAA/NoGXgT6ADEATQBOAE8AUABRAAABNjY3NSE1IRUhESMiDgIVFBYzMjcmNTQ2MzIWFRQGBxMHAwYEIyImNTQ3JjU0NjMyASIkJyYjIgYVFBYXNjMyFwcmIyIGFRQWMzI2NwMBAwMCGhX11vwGBl7+rml+gFcnjI0WDgJhTGZ5Mzqv8miK/ru7udc2tMWzRQIvwf7wLyA3R01EQCoyNicVFSY4QE9Rg/VyRAEbEAkCaImmDHXi4v6rFS9AK1lZAg8cQkttVTlSHP5GVQEslpaghlZEWaZ7jv4WjooJLiomMQcICMQDLSQtNYWEBHj7BvzaAiUAAAAFAAD82gSUBPoANgA3ADgAOQA6AAATNDY3NSE1IRUhESMiDgIVFBYzMjcmNTQ2MzIWFRQGBxMHAwYjIicRFAYjIiYmNTQ2MzM1JiYBAQMDS/3o/dAElP6uaX6AVyeMjRYOAmFMZnkzOq/ypz0wOic+QjiEWUJAI2JiAeUBN7oJAierxQx14uL+qxUvQCtZWQIPHEJLbVU5Uhz+RlQB4QQF/rVHRFaANkE5ljetA0n7BvzaAiUAAAAABwAA/NoFtQT6AC0ANgA+AD8AQABBAEIAAAE0Njc1ITUhFSERIyIOAhUUFjMyNyY1NDYzMhYVFAYHEwcDBgQjIiY1NDY3JgEiJicGBwU2NwUlBhUUFjMyAQEDAwFs/ej8rwW1/q5pfoBXJ4yNFg4CYUxmeTM6r/JpdP7iq8TnppMRAhVzwUAmIAEkVlT+v/73EmpnIAEnARqSCQInq8UMdeLi/qsVL0ArWVkCDxxCS21VOVIc/kZUAS15er+mhr4fOf6iNC0DBuozX83ZISpKSwVM+wb82gIlAAAAAAYAAPzaBw4E+gBNAE4ATwBQAFEAUgAAATQ2NzUhNSEVIREjIg4CFRQWMzI3JjU0NjMyFhUUBgcTBwMFFxYVFAYjIiYmNTQ3NycmJiMiBhUUFjMyNxcGIyImNTQ2MzIWFxclJiYBAScDAwLF/ej7VgcO/q5pfoBXJ4yNFg4CYUxmeTM6r/J9/f4IE1E/PpdbWRFQJj4pJy0nIC4kN0phbIiyj4uePUUBbNfwAeUBGqh7CQInq8UMdeLi/qsVL0ArWVkCDxxCS21VOVIc/kZVAWnTFjQcNEE+XThBJAfHXTgqISQnEKIbhmt/kXmYq5AS0wOJ+waC/FgCJQAAAAYAAPzaBaEE+gAtADoAOwA8AD0APgAAATQ2NzUhNSEVIREjIg4CFRQWMzI3JjU0NjMyFhUUBgcTBwMGBCMiJjU0NjcmASImJwYGFRQWMzI2NwMBAwMBWP3o/MMFof6uaX6AVyeMjRYOAmFMZnkzOq/yaXT+56S/5JmMEQIVbrNGXWZjW2O/XUMBGn4JAierxQx14uL+qxUvQCtZWQIPHEJLbVU5Uhz+RlQBLXl6v6iCvCE7/qIsLAlQPkRHZWUEePsG/NoCJQAABQAA/NoHAwT6AFUAVgBXAFgAWQAABQYEIyImNTQ3JiY1NDYzMhYVFAcnNjU0JiMiBhUUFhc2MzIXByYjIgYVFBYzMjY3IyIkNTQ2NzUhNSEVIREjIg4CFRQWMzI3JjU0NjMyFhUUBgcTBwEBAwMFe4r+u7u51xeZn7SLgJ1HqRohHSo3gXhBYzYnFRUmOEBPUYP1chTy/t396PthBwP+rml+gFcnjI0WDgJhTGZ5Mzqv8v68ARq0CTCWlqCGNTEoqXuAq3pga1BdICIbH0AySFIIHgjEAy0kLTWFhNrLq8UMdeLi/qsVL0ArWVkCDxxCS21VOVIc/kZVBlb7BvzaAiUAAAAFAAD82gmoBPoARABgAGEAYgBjAAAFBgQjIiY1NDcmJjU0NjMyFhUUByc2NTQmIyIGFRQWFzYzMhcHJiMiBhUUFjMyNjcjIiQ1NDY3NSE1IRUjESE1BgYjIiYBESERIyIOAhUUFjMyNyY1NDYzMhYVFAcWMzIBAxcFo4v+osm51xeZn7SLgJ1HqRohHSo3gXhBYzYnFRUmOEBPUYP1chTy/t396PthCajB/u4xh0NXoAHy/dxpfoBXJ4yNFg4CYUxmeWc5c4/9ngnnAauwoIY1MSipe4CremBrUF0gIhsfQDJIUggeCMQDLSQtNYWE2surxQx14uL6tvghJ0EBLAMt/qsVL0ArWVkCDxxCS21VcTNO/HICJVsABQAA/NoEygT6ADoAOwA8AD0APgAAASYjIhUUFjMzMjcmNTQ2MzIWFRQHFhcHJicGIyIkNTQ3JjU0PgIzMzUhNSEVIREhIgYGFRQXNjMyFwMTAwMDB0I+3YCGFAkHA2FMaHdtOk/eR0MdTfr+6z+DNWOGdpX9gwTK/sT+XTcyIjBlZ2NanNe7CQFmCIZGQgEaEkJLa1d8KmplX3GRBru1ZU5plERiQxxz4uL+uhAqIC8qJQsCwfsG/NoCJQACAAD/9gY8BPoAHwAwAAABJiYjIyImNTQ2MzM1ITUhFSMRITUhFRQGIyImJjU0MxMRISIGBhUUFhYzMzIWFyERAlYDKTdXr6qrpVv+EgY8wf7t/u1ESziQYJHQ/qszJRcZKzJzkZEHARsBdSYahXN6fnPi4vvorChIRll/NXICo/7ECBoUEhgIfoECowAAAAACAAD/6AZMBPoAFwAxAAAlBiEiJjU0NyY1ND4CMzM1ITUhFSMRIQEmIyIGFRQWMzI2NxEhESEiBgYVFBc2MzIXBHjy/s69/yltNWOGdjv93QZMwf7t/r98SmFmWVOK9qL+vP63NzIiGWh1fHehuaqYTT9iiERiQxxz4uL76AGBEDE5NDp4oAJH/roQKiAhHyUVAAAAAQAA/+gE/QT6AC0AAAEAISImNTQ3JjU0PgIzMzUhNSEVIREhIgYGFRQXNjMyFwcmIyIGFRQWMzI2NwT9/uH+dr3/KW01Y4Z2O/3dBGH+0/63NzIiGWh1fHcSfEphZllTivaiAQv+3aqYTT9iiERiQxxz4uL+uhAqICEfJRXHEDE5NDp4oAAAAAMAAP/OBTcE+gAbACUAJgAAASMRIREFFxYVFAYjIiYmNTQ3NzY3JiY1ESM1IQUVFBYWMzI2NxEDBTfB/u7+lx4ea0dNmVVUSUNbWm69BTf8mBlDRD2ENPUEGPvoAZ/DNDQrN0Q4XzpAMCklKiWsugEG4uL1ZFU1PTUBcf07AAAAAgAAAAAIAgT6ACoANQAAAQYjIiYnBgYjIiY1NDYzMhcHJiMiBhUUFjMyNjcmJzY2NTQnITUhFSMRIQEWFjMyNxEhFhUUBi+Bu2CgQGnZgb/m8MmTTBU9XmxoV1JRi1EbDnRzLPw5CALB/u7+AyFoUK52/qI1AS1UQEFWT8SuqsoR5BBMREdIQFNIUxdfVEVE4uL76AJORk2RAcxbY7sAAAQAGfzaBkQFDwAxAD0APgA/AAABJCcGBSc2NyYmNTQ2MzIWFRQGBxYzESM1IRUjESE1BiMiJicHJwEXBwYGFRQWMzI2NwE2NjU0JiMiBhUUFgEDBHD+keR4/wCMvHRXV8yts85JTKvFbAJAwf7tcHyjzgvEjAKTcihNO0g7Pmg6/aU7Pz04OEE6A/IJAeQbWEiLv1Q5QJFQfqCTflR8PiIBSuLi+n6bPqORhL8BdM8XLEwxNzsvNwMEJkouMDU6MSxM+XsCJQAABgAZ/NoE2wUPABUAIQA2ADcAOAA5AAABJCcGBSc2NyYmNTQ2MzIWFRQGBxYzJTY2NTQmIyIGFRQWAQYGIyImJwcnARcHBgYVFBYzMjY3AQMVBHD+keR4/wCMvHRXV8yts85JTKvF/aU7Pz04OEE6AwBMs1ijzgvEjAKTcihNO0g7Pmg6/XUJAeQbWEiLv1Q5QJFQfqCTflR8PiJxJkouMDU6MSxM/BQ9Q6ORhL8BdM8XLEwxNzsvN/yfAiVpAAAABAAZ/NoGRAUPADQAQABBAEIAAAEkJwYFJzY3JiY1NDYzMhYVFAYHFjMRIzUhFSMRITUGBiMiJjU0NjMyFwcmIyIVFBYzMjY3ATY2NTQmIyIGFRQWAQMEcP6R5Hj/AIy8dFdXzK2zzklMq8VsAkDB/u1BiVu33Om8jUgVPFPCVEVRgjj9pTs/PTg4QToD8gkB5BtYSIu/VDlAkVB+oJN+VHw+IgFK4uL6fqknJb2jlMAR4Ax3Ojw3NgL9JkouMDU6MSxM+XsCJQAGABn82gTaBQ8AFQAhADcAOAA5ADoAAAEkJwYFJzY3JiY1NDYzMhYVFAYHFjMlNjY1NCYjIgYVFBYBBgYjIiY1NDYzMhcHJiMiFRQWMzI3AQMXBHD+keR4/wCMvHRXV8yts85JTKvF/aU7Pz04OEE6Av9ewHG33Om8jUgVPFPCVEWbcPytCYcB5BtYSIu/VDlAkVB+oJN+VHw+InEmSi4wNToxLEz8GUc+vaOUwBHgDHc6PG38mAIlaQAAAAIAGf/eCWIFDwA3AEMAAAEkJwYFJzY3JiY1NDYzMhYVFAYHFhc2NjMyFhc2MzM1ITUhFSMRIREGByc2NyYjIgYVFBYXByYmATY2NTQmIyIGFRQWA6v+/o10/v2MvHRXV8yts85PT2RzKqhoY51Pe7gF/JUFPsH+7qZH/xMZRkk8SYqzsdK7/lo/PD04OEE5Aec2OUeLv1Q5PpFSfqCTfld+QBgMTlUxN2ex4uL76AKICv9ORzU5QjZRnIm2p+8ByilJLTA1OjEqTQAAAAIAGf/vBkQFDwAtADkAABM2NyYmNTQ2MzIWFRQGBxYXESM1IRUjESERBRcWFRQGIyImJjU0Nzc2NyYnBgUBNjY1NCYjIgYVFBYZvHRXV8yts85TV7rLbAJAwf7t/pgeHmtHTZlVVHKEj4uKWv7kAXA9PT04OEE6AkNUOUCRUH6gk35YhEI7DwGC4uL76AG/wjQ0KzdEOF86QDBASkcbOTeaAbooSS4wNToxKEwAAAAABAAAAAAFHgT6AB0AIAAoACkAAAEHIyIGBhUUFjMyNjcXBiEiJDU0NyYmNTUjNSEVIwERIQcUFhc2MzMBEwRdwLqNgTuOkmOwY1XW/vv+/txZLSmLBR7B/vb+zYsVH1tnOf7R7AKtuiJAI0dDJizZXMSpelEyines4uL+3wEhpVVZHhQBG/wq//8AAP3cBR4E+gImC1oAAAEHChAEcwAAAAAAAAAGAAAAAAUtBPoAFQAYACAALQAuAC8AAAEHFhYVFAQhICQ1NDcmJjU1IzUhFSMBESEHFBYXNjMzAQEjIgYGFRQhMjY1NCYTAwRdVVhZ/uv++P7//ttkKiWLBS3Q/vb+zYsZGFVgSf7RAWEnf4NFARCEi10J0gLAVDiYWp6jw6p/VTGJcKzi4v7fASGrYE0WEQEb/h0fPyuQQUE2TwMZ+wb//wAA/dwFLQT6AiYLXAAAAQcKEARzAAAAAAAAAAYAAPzaBrcE+gA7AEUARgBHAEgASQAAARUUBgYjIiYmNTUGBhUUFhcHJgI1NDY3JjU0PgIzITUhNSEVIREhIgYGFRQXNjMhMhYWFREhETQmJisCFRQWMzI2NjUTEwMTBG4/gGVogEBSYYajst6rPj5kNWOGdgJq+40Gt/7N/Ig6NB0jYrgB+nZ7Tf7tECc136AqJR4hEtsuCQcBa8hxekNJi4mQDWdRaL+XttkBFYlYjzheg0RiQxxz4uL+qxEoGyMkHCxxcf1gAmk0KQ+ZYz4aPkoEJ/fgAiX+3QAFAAD82gYrBPoAQABBAEIAQwBEAAABIgYGFRQWFwcmAjU0NjcmNTQ+AjMhNSE1IRUhESEiBgYVFBc2MyEyFhYVESERIxUUBiMiJiY1NDYzITU0JiYjCwITAsiMfFGGo7Leqz4+ZDVjhnYB3vwZBiv+zf0UOjQdI2K4AW52e03+7as+OzeEV0JAAbQQJzUEDgkHAWsiYUtov5e22QEViViPOF6DRGJDHHPi4v6rESgbIyQcLHFx/WABeDBJQVeANUE4JjQpDwOP9+ACJf7dAAAABQAA/NoHXAT6AC0APQA+AD8AQAAAASIGFRQWFwcmAjU0NjcmNTQ+AjMzNSE1IRUjESERIRUUBiMiJiY1NDYzMyYmJTYzMhYXIREhESEiBgYVFAEDEwJ7goqGo7Leqz0+YzVjhnb5/P4HXMH+7f7NREs5j2BISR4Lbf7OYm/c5SABM/6L/fk6NB0CWAkHAWtrY2i/l7bZARWJV5A4XoNEYkMcc+Li+n4BbCJSUWOMOkE9QUa+G6W7AzT+qxEoGyT6jwIl/t0AAAAFAAD82gdEBPoAKAA+AD8AQABBAAABIgYVFBYXByYCNTQ2NyY1ND4CMzM1ITUhFSMRITUGIyImJzY2NTQmJTYzMhYVFAYHFjMyNxEhESEiBgYVFAEDEwKYjpuIobLeqz9AZzVjhnb5/P4HRMH+7nKbtec5XG1d/q1xiOHpVlZIdYls/qL9+To0HQJYCQcBa3FlY76VttkBFYlXjTdghURiQxxz4uL6fqc6wsYKQS00NLkgmZFNeydQYAPd/qsRKBsn+pICJf7dAAUAAPzaBvgE+gBKAEsATABNAE4AAAEiBgYVFBYXByYCNTQ2NyY1ND4CMyE1ITUhFSERISIGBhUUFzYzITIWFhURIREGByc2NyYjIhUUFhcHJiY1NDYzMhYXNjcuAiMLAhMCyIx8UXqVtsmiPj5kNWOGdgKr+0wG+P7N/Ec6NB0jYrgCO3Z7Tf7tXiu4DhsrL2Fxh5K1l597THM7R14BESU1BBMJBwFrImFLarufsNYBFYxYjzheg0RiQxxz4uL+qxEoGyMkHCxxcf1gAb0jnTg8MxdWQnlpmJLSaH6KIiY0DyolDgOP9+ACJf7dAAAAAAUAAPzaBo8E+gBIAEkASgBLAEwAAAEiBgYVFBYXByYCNTQ2NyY1ND4CMyE1ITUhFSERISIGBhUUFzYzITIWFhURITUGIyImNTQ2MzIXByYjIgYVFBYzMjcRNCYmIwMBAxMCyIx8UYajst6rPj5kNWOGdgJC+7UGj/7N/LA6NB0jYrgB0nZ7Tf7taIWXuauPaU8SPT46PEE7b2MQJzUEAdIJBwFrImFLaL+XttkBFYlYjzheg0RiQxxz4uL+qxEoGyMkHCxxcf1gezGciH+YEcANMCsuLUoBFDQpDwOP9+ACJf7dAAMAAP/eBiwE+gAtAC4ALwAAASIGByc2NyYjIgYVFBYXByYCNTQ2MzIWFzY3NSE1IRUhFRYWFRQGByc2NjU0JgMRBC5JfiL/EhpIRzxJirOx38DHmWScTll8/DIGLP60a29fa+tPVD+bAoSFgE5EOThCNlGcibaxAQOMna0wN0gVuuLiyymudXflgoJfoklKSwJ2+wYAAAD//wAA/lQGLAT6AiYLZAAAAQcJpwWJAAAAAAAA//8AAP3cBiwE+gImC2QAAAEHChAFhAAAAAAAAP//AAD93AYsBPoCJgtkAAAAJwoQBYQAAAEHCacDUwB3AAAAAAAEABn/3gmJBQ8AQgBOAE8AUAAAASQnBgUnNjcmJjU0NjMyFhUUBgcWFzY2MzIWFzY3NSE1IRUhFRYWFRQGByc2NjU0JiMiBgcnNjcmIyIGFRQWFwcmJgE2NjU0JiMiBhUUFgERA6v+/o10/v2MvHRXV8yts85PT2RzKqhoY51OWH38+QVl/rRrb19r609UPztJfiL/EhpIRzxJirOx0rv+Wj88PTg4QTkFUQHnNjlHi79UOT6RUn6gk35XfkAYDE5VMTdIFrri4ssprnV35YKCX6JJSkuFgE5EOThCNlGciban7wHKKUktMDU6MSpNAZr7BgAAAwAA/7sGCgT6ACsALAAtAAABFRYWFRQGBxYXByYnBiMiJjU0NjMyFhc2NjU0JiMiByc2NzUhNSEVIxEhEQETAj2conNpTU64d1A8Ql1wXU82VzFASmhhiX9Sfmz+1QYKwf7t/QdeBBh9G7eOb7k8P02QhkgJX09KVSklF144SE4/1TUOfeLi++gEGP02AlYAAwAA/7sDxAT6ACcAKAApAAABFRYWFRQGBxYXByYnBiMiJjU0NjMyFhc2NjU0JiMiByc2NzUhNSEVARMCPZyic2lNTrh3UDxCXXBdTzZXMUBKaGGJf1J+bP7VA8T9eV4EGH0bt45vuTw/TZCGSAlfT0pVKSUXXjhITj/VNQ594uL9NgJWAAAA//8AAP6wBgoE+gImC2kAAAEHCacDqgBcAAAAAP//AAD+sAPEBPoCJgtqAAABBwmnA6oAXAAAAAAAAQAAAAAKrwcsABkAAAEVIxEhESM1MyY1NCQhIAABISQkISIGFRQXAnjB/u2kmTcBWQFEAkwEAgFi/o3+x/y9/oH11i4E+uL76AQY4mRuqrb+zP7znr9daUhAAAABAAAAAAUWBywAGQAAARUjESERIzUzJjU0NjMyABMhJiYjIgYVFBcCeMH+7aSZNuDF8QGKk/7vaO2GV10sBPri++gEGOJxaZ+5/t/+4KysUUxXVQABAAAAAAWcBywAGQAAARUjESERIzUzJjU0NjMgABMhJiQjIgYVFBcCeMH+7aSZN/LWAQIBubf+33/+6phdeysE+uL76AQY4m9morv+3/7grq9cTlVPAAAAAQAAAAAGZQcsABkAAAEVIxEhESM1MyY1NCQzIAATISYkIyIGFRQXAnjB/u2kmjgBDPEBGQH59P7Hnv6stYOMKgT64vvoBBjicV6mvf7p/taur15WUEoAAAEAAAAABt8HLAAZAAABFSMRIREjNTMmNTQkMyAAEyEmJCMiBhUUFwJ4wf7tpJo4ARX6AT4CN/n+waz+dtCRkysE+uL76AQY4nFep7z+3/7gqrNfV1BIAAABAAAAAAdZBywAGQAAARUjESERIzUzJjU0JCEgAAEhJiQjIgYVFBcCeMH+7aSaOAEeAQMBYgJvAQX+ur/+P+ScnSsE+uL76AQY4m5ip7v+3v7hq7JeWk9HAAAAAAEAAAAAB9MHLAAZAAABFSMRIREjNTMmNTQkISAAASEmJCMiBhUUFwJ4wf7tpJo4ASYBDAGEAqgBE/60yv4G/6enLAT64vvoBBjib2Gnu/7d/uKmt11dUEQAAAAAAQAAAAAITQcsABkAAAEVIxEhESM1MyY1NCQhIAABISYkISIGFRQXAnjB/u2kmjgBLQEXAaYC4gEf/q3d/c/+7bSvLAT64vvoBBjibWOnu/7a/uWluF5eTkQAAAABAAAAAAjHBywAGQAAARUjESERIzUzJjU0JCEgAAEhJiQhIgYVFBcCeMH+7aSaOAE4AR4BxwMaAS7+p+79mP7Vv7gsBPri++gEGOJnaqi5/tj+56O6XWFMRAAAAAEAAAAACUEHLAAZAAABFSMRIREjNTMmNTQkISAAASEmJCEiBhUUFwJ4wf7tpJo4AUIBJgHsA1YBNf6g/P1e/r/NvywE+uL76AQY4mhpqbj+0v7toL1dY0pEAAAAAQAAAAAJuwcsABkAAAEVIxEhESM1MyY1NCQhIAABISQkISIGFRQXAnjB/u2kmjgBSQEwAgkDjwFI/pr+7/0q/qnYyS0E+uL76AQY4mdqqLn+0v7toL1cZkhEAAABAAAAAAo1BywAGQAAARUjESERIzUzJjU0JCEgAAEhJCQhIgYVFBcCeMH+7aSZNwFQATsCKwPJAVT+k/7W/PT+muTSLQT64vvoBBjiZW2ouP7P/vCiu1tpRkQA//8AAAAACsUHLAImC20AAAEHCW8LXQBaAAAAAP//AAAAAAXABywCJgtuAAABBwlvBlgAZAAAAAD//wAAAAAGHAcsAiYLbwAAAQcJbwa0AGQAAAAA//8AAAAABsUHLAImC3AAAAEHCW8HXQBkAAAAAP//AAAAAAcjBywCJgtxAAABBwlvB7sAZAAAAAD//wAAAAAHjgcsAiYLcgAAAQcJbwgmAGQAAAAA//8AAAAAB+sHLAImC3MAAAEHCW8IgwBkAAAAAP//AAAAAAhhBywCJgt0AAABBwlvCPkAZAAAAAD//wAAAAAIygcsAiYLdQAAAQcJbwliAGQAAAAA//8AAAAACVAHLAImC3YAAAEHCW8J6ABkAAAAAP//AAAAAAm+BywCJgt3AAABBwlvClYAZAAAAAD//wAAAAAKNQcsAiYLeAAAAQcJbwrKAGQAAAAAAAEAAAAAC2UHLAApAAABFSMRIREjNTMmNTQkISAEBScmNjYzMhcHJiMiBhUUFhchJCQhIgYVFBcCeMH+7aSZNwFZAUQBqQMSAVgBBUqqen1uMjxHT0kuJv7Q/sf8vf6B9dYuBPri++gEGOJkbqq2pKUSVJBTH8QVTUswdzSev11pSEAAAAEAAAAABfsHLAAoAAABFSMRIREjNTMmNTQ2MzIWFzY2MzIXByYjIgYVFBcWFyEmJiMiBhUUFwJ4wf7tpJk24MV/6Gogn3R/cDI8R0xMAjgu/u9o7YZXXSwE+uL76AQY4nFpn7lWVVVWH8QVTEwhDFRarKxRTFdVAAEAAAAABpcHLAAnAAABFSMRIREjNTMmNTQ2MzIEFzY2MzIXByYjIgYVFBYXISYkIyIGFRQXAnjB/u2kmTfy1pkBF34ds4d7bTI8R09JLib+3X/+6phdeysE+uL76AQY4m9mortlZWFpH8QVTUswdzSur1xOVU8AAAEAAAAAB1QHLAAnAAABFSMRIREjNTMmNTQkMzIEFzY2MzIXByYjIgYVFBYXISYkIyIGFRQXAnjB/u2kmjgBDPG2AVehErmUe20yPEdPSS4m/tGe/qy1g4wqBPri++gEGOJxXqa9eHdzfB/EFU1LMHc0rq9eVlBKAAEAAAAAB8cHLAAnAAABFSMRIREjNTMmNTQkMzIEFzY2MzIXByYjIgYVFBYXISYkIyIGFRQXAnjB/u2kmjgBFfrSAYe1DruYem0yPEdPSS4m/tKs/nbQkZMrBPri++gEGOJxXqe8f355hB/EFU1LMHc0qrNfV1BIAAEAAAAACDsHLAAnAAABFSMRIREjNTMmNTQkITIEFzY2MzIXByYjIgYVFBYXISYkIyIGFRQXAnjB/u2kmjgBHgED7AGyxwzAnH1uMjxHT0kuJv7Rv/4/5JydKwT64vvoBBjibmKnu4KBfYYfxBVNSzB3NKuyXlpPRwAAAAABAAAAAAivBywAJgAAARUjESERIzUzJjU0JCEgBBc2NjMyFwcmIyIGFRQWFyEmJCMgFRQXAnjB/u2kmjgBJgEMAQkB5NsIwZ99bjI8R09JLib+0cr+Bv/+siwE+uL76AQY4m9hp7uIh4KNH8QVTUswdzSmt7pQRAABAAAAAAkiBywAJwAAARUjESERIzUzJjU0JCEgBBc2NjMyFwcmIyIGFRQWFyEmJCEiBhUUFwJ4wf7tpJo4AS0BFwEkAhTwBcCkfW4yPEdPSS4m/tHd/c/+7bSvLAT64vvoBBjibWOnu42MhpMfxBVNSzB3NKW4Xl5ORAAAAQAAAAAJlgcsACcAAAEVIxEhESM1MyY1NCQhIAQFNjYzMhcHJiMiBhUUFhchJiQhIgYVFBcCeMH+7aSaOAE4AR4BPwJFAQYBwad9bjI8R09JLib+0e79mP7Vv7gsBPri++gEGOJnaqi5kpKLmR/EFU1LMHc0o7pdYUxEAAEAAAAACgoHLAAoAAABFSMRIREjNTMmNTQkISAEBTU0NjMyFwcmIyIGFRQWFyEmJCEiBhUUFwJ4wf7tpJo4AUIBJgFbAnYBG8GofW4yPEdPSS4m/tD8/V7+v82/LAT64vvoBBjiaGmpuJiXCYubH8QVTUswdzSgvV1jSkQAAAAAAQAAAAAKfQcsACgAAAEVIxEhESM1MyY1NCQhIAQFNTQ2MzIXByYjIgYVFBYXISQkISIGFRQXAnjB/u2kmjgBSQEwAXgCpwEvwah9bjI8R09JLib+0f7v/Sr+qdjJLQT64vvoBBjiZ2qouZybEYubH8QVTUswdzSgvVxmSEQAAAABAAAAAArxBywAKQAAARUjESERIzUzJjU0JCEgBAUnJjY2MzIXByYjIgYVFBYXISQkISIGFRQXAnjB/u2kmTcBUAE7AZMC2QFFAQVArIJ9bjI8R09JLib+0P7W/PT+muTSLQT64vvoBBjiZW2ouKCgDUSTXB/EFU1LMHc0ortbaUZEAAACAAAAAAtlBywAKQA1AAABFSMRIREjNTMmNTQkISAEBScmNjYzMhcHJiMiBhUUFhchJCQhIgYVFBcBMhYVFAYjIiY1NDYCeMH+7aSZNwFZAUQBqQMSAVgBBUqqen1uMjxHT0kuJv7Q/sf8vf6B9dYuCTssPz8sLD8/BPri++gEGOJkbqq2pKUSVJBTH8QVTUswdzSev11pSEABEz8sLD8/LCw/AAIAAAAABfsHLAAoADQAAAEVIxEhESM1MyY1NDYzMhYXNjYzMhcHJiMiBhUUFxYXISYmIyIGFRQXATIWFRQGIyImNTQ2AnjB/u2kmTbgxX/oaiCfdH9wMjxHTEwCOC7+72jthlddLAPTLD8/LCw/PwT64vvoBBjicWmfuVZVVVYfxBVMTCEMVFqsrFFMV1UBEz8sLD8/LCw/AAAAAAIAAAAABpcHLAAnADMAAAEVIxEhESM1MyY1NDYzMgQXNjYzMhcHJiMiBhUUFhchJiQjIgYVFBcBMhYVFAYjIiY1NDYCeMH+7aSZN/LWmQEXfRy0iHttMjxHT0kuJv7df/7qmF17KwRwLD8/LCw/PwT64vvoBBjib2aiu2dnY2sfxBVNSzB3NK6vXE5VTwETPywsPz8sLD8AAgAAAAAHVAcsACcAMwAAARUjESERIzUzJjU0JDMyBBc2NjMyFwcmIyIGFRQWFyEmJCMiBhUUFwEyFhUUBiMiJjU0NgJ4wf7tpJo4AQzxtgFXoRK5lHttMjxHT0kuJv7Rnv6stYOMKgUuLD8/LCw/PwT64vvoBBjicV6mvXh3c3wfxBVNSzB3NK6vXlZQSgETPywsPz8sLD8AAAAAAgAAAAAHxwcsACcAMwAAARUjESERIzUzJjU0JDMyBBc2NjMyFwcmIyIGFRQWFyEmJCMiBhUUFwEyFhUUBiMiJjU0NgJ4wf7tpJo4ARX60gGHtQ67mHptMjxHT0kuJv7SrP520JGTKwWgLD8/LCw/PwT64vvoBBjicV6nvH9+eYQfxBVNSzB3NKqzX1dQSAETPywsPz8sLD8AAAAAAgAAAAAIOwcsACcAMwAAARUjESERIzUzJjU0JCEyBBc2NjMyFwcmIyIGFRQWFyEmJCMiBhUUFwEyFhUUBiMiJjU0NgJ4wf7tpJo4AR4BA+wBsscMwJx9bjI8R09JLib+0b/+P+ScnSsGFCw/PywsPz8E+uL76AQY4m5ip7uCgX2GH8QVTUswdzSrsl5aT0cBEz8sLD8/LCw/AAAAAgAAAAAIrwcsACYAMgAAARUjESERIzUzJjU0JCEgBBc2NjMyFwcmIyIGFRQWFyEmJCMgFRQXATIWFRQGIyImNTQ2AnjB/u2kmjgBJgEMAQkB5NsIwZ99bjI8R09JLib+0cr+Bv/+siwGhyw/PywsPz8E+uL76AQY4m9hp7uIh4KNH8QVTUswdzSmt7pQRAETPywsPz8sLD8AAAAAAgAAAAAJIgcsACcAMwAAARUjESERIzUzJjU0JCEgBBc2NjMyFwcmIyIGFRQWFyEmJCEiBhUUFwEyFhUUBiMiJjU0NgJ4wf7tpJo4AS0BFwEkAhTwBcCkfW4yPEdPSS4m/tHd/c/+7bSvLAb6LD8/LCw/PwT64vvoBBjibWOnu42MhpMfxBVNSzB3NKW4Xl5ORAETPywsPz8sLD8AAgAAAAAJlgcsACcAMwAAARUjESERIzUzJjU0JCEgBAU2NjMyFwcmIyIGFRQWFyEmJCEiBhUUFwEyFhUUBiMiJjU0NgJ4wf7tpJo4ATgBHgE/AkUBBgHBp31uMjxHT0kuJv7R7v2Y/tW/uCwHbiw/PywsPz8E+uL76AQY4mdqqLmSkouZH8QVTUswdzSjul1hTEQBEz8sLD8/LCw/AAAAAAIAAAAACgoHLAAoADQAAAEVIxEhESM1MyY1NCQhIAQFNTQ2MzIXByYjIgYVFBYXISYkISIGFRQXATIWFRQGIyImNTQ2AnjB/u2kmjgBQgEmAVsCdgEbwah9bjI8R09JLib+0Pz9Xv6/zb8sB+IsPz8sLD8/BPri++gEGOJoaam4mJcJi5sfxBVNSzB3NKC9XWNKRAETPywsPz8sLD8AAAACAAAAAAp9BywAKAA0AAABFSMRIREjNTMmNTQkISAEBTU0NjMyFwcmIyIGFRQWFyEkJCEiBhUUFwEyFhUUBiMiJjU0NgJ4wf7tpJo4AUkBMAF4AqcBL8GofW4yPEdPSS4m/tH+7/0q/qnYyS0IVCw/PywsPz8E+uL76AQY4mdqqLmcmxGLmx/EFU1LMHc0oL1cZkhEARM/LCw/PywsPwAAAgAAAAAK8QcsACkANQAAARUjESERIzUzJjU0JCEgBAUnJjY2MzIXByYjIgYVFBYXISQkISIGFRQXATIWFRQGIyImNTQ2AnjB/u2kmTcBUAE7AZMC2QFFAQVArIJ9bjI8R09JLib+0P7W/PT+muTSLQjILD8/LCw/PwT64vvoBBjiZW2ouKCgDUSTXB/EFU1LMHc0ortbaUZEARM/LCw/PywsPwAB/ZsAAAJ4BywAGQAAARUjESERIzUzJiYjIgYVFBclJjU0NjMyABMCeMH+7aSmR6dpTVQ3/vZA2bzdATNkBPri++gEGOKtok9HaGEBe3ebtP7v/t8AAAAAAf0CAAACeAcsABkAAAEVIxEhESM1MyYmIyIGFRQXJSY1NDYzMgATAnjB/u2klGPZgVxmNv73QObM8QFvkgT64vvoBBjiq6NUTGBeAXtwn7f+9f7ZAAAAAAH8LgAAAngHLAAZAAABFSMRIREjNTMmJCMiBhUUFyEmNTQ2MyAAEwJ4wf7tpHyK/t+gcn41/vc/+eABDQHB1gT64vvoBBjiqKRbUVtUeGqjvP73/tcAAP///ZsAAAJ4BywCJgueAAABBwlvAv4AZAAAAAD///0CAAACeAcsAiYLnwAAAQcJbwLRAGQAAAAA///8LgAAAngHLAImC6AAAAEHCW8CnABkAAAAAAAB/ZsAAAKrBywAKQAAARUjESERIzUzJiYjIgYVFBclJjU0NjMyFhc2NjMyFwcmIyIGFRUUFxYXAnjB/u2kpkenaU1UN/72QNm8arVMJ5Fpf3AyPEdMTAEoHQT64vvoBBjiraJPR2hhAXt3m7RDQ0NDH8QVTEwTCQlTVAAAAf0CAAACqwcsACcAAAEVIxEhESM1MyYmIyIGFRQXJSY1NDYzMhYXNjYzMhcHJiMiBhUUFhcCeMH+7aSUY9mBXGY2/vdA5syH6WMennl/cDI8R0xMKSAE+uL76AQY4qujVExgXgF7cJ+3WVhWWx/EFUxMLHAwAAAAAfwuAAACqwcsACcAAAEVIxEhESM1MyYkIyIGFRQXISY1NDYzMgQXNjYzMhcHJiMiBhUUFhcCeMH+7aR8iv7foHJ+Nf73P/ngsQE5iROniH9wMjxHTEwpIAT64vvoBBjiqKRbUVtUeGqjvHFxbnQfxBVMTCxwMAAAAv2bAAACqwcsACkANQAAARUjESERIzUzJiYjIgYVFBclJjU0NjMyFhc2NjMyFwcmIyIGFRUUFxYXEzIWFRQGIyImNTQ2AnjB/u2kpkenaU1UN/72QNm8arVMJ5Fpf3AyPEdMTAEoHYEsPz8sLD8/BPri++gEGOKtok9HaGEBe3ebtENDQ0MfxBVMTBMJCVNUARM/LCw/PywsPwAAAv0CAAACqwcsACcAMwAAARUjESERIzUzJiYjIgYVFBclJjU0NjMyFhc2NjMyFwcmIyIGFRQWFxMyFhUUBiMiJjU0NgJ4wf7tpJRj2YFcZjb+90DmzIfpYx6eeX9wMjxHTEwpIH4sPz8sLD8/BPri++gEGOKro1RMYF4Be3Cft1lYVlsfxBVMTCxwMAETPywsPz8sLD8AAAAC/C4AAAKrBywAJwAzAAABFSMRIREjNTMmJCMiBhUUFyEmNTQ2MzIEFzY2MzIXByYjIgYVFBYXEzIWFRQGIyImNTQ2AnjB/u2kfIr+36ByfjX+9z/54LEBOYkTp4h/cDI8R0xMKSB+LD8/LCw/PwT64vvoBBjiqKRbUVtUeGqjvHFxbnQfxBVMTCxwMAETPywsPz8sLD8AAAP7K/3cAC8APQAYACQAJQAAJTYzMhYVFAYjIiQnNxYWMzI2NTQmIyIGByUyFhUUBiMiJjU0NiX+C3hnlLG2m7T+5o67ba1rOUM7MyA3Kv1yP1dXPz9XVwLHDy6pjIelnbeGkno3Ly4xDBE0V0JCV1dCQld6AAAAAAP7K/3cAC8APQAYACQAJQAAJTYzMhYVFAYjIiQnNxYWMzI2NTQmIyIGByUyFhUUBiMiJjU0NiX+C3hnlLG2m7T+5o67ba1rOUM7MyA3Kv1yP1dXPz9XVwIxDy6pjIelnbeGkno3Ly4xDBE0V0JCV1dCQld6AAAAAAL8gv3cAC8APQAYABkAACU2MzIWFRQGIyIkJzcWFjMyNjU0JiMiBgcn/gt4Z5Sxtpu0/uaOu22tazlDOzMgNyoGDy6pjIelnbeGkno3Ly4xDBGuAAP7ef3cAREAPQAXACMAJAAAAwYjIiY1NDYzMgQXByYmIyIGFRQWMzI3JTIWFRQGIyImNTQ2JbF6cp3Cyaq7ATinx3DaeEZNPzpJUv0GP1dXPz9XVwJ5/gouqn+Nq8LpeLShNTEvMCK6V0JCV1dCQld6AAAAA/t5/dwBEQA9ABcAIwAkAAADBiMiJjU0NjMyBBcHJiYjIgYVFBYzMjclMhYVFAYjIiY1NDYlsXpyncLJqrsBOKfHcNp4Rk0/OklS/QY/V1c/P1dXAeP+Ci6qf42rwul4tKE1MS8wIrpXQkJXV0JCV3oAAAAD/C393ABcAFEAEAAcAB0AABMGIyImNTQ2NxcGFRQzMjY3JTIWFRQGIyImNTQ2JVyMp5vHzsEYtn00ZT/8tj9XVz8/V1cBxf4jR6aDka4N0gxqXyAgnFdCQldXQkJXegAD/C393ABcAFEAEAAcAB0AABMGIyImNTQ2NxcGFRQzMjY3JTIWFRQGIyImNTQ2JVyMp5vHzsEYtn00ZT/8tj9XVz8/V1cBL/4jR6aDka4N0gxqXyAgnFdCQldXQkJXegAD/C382gC0AFEAIAAsAC0AABMGIyImNTQ3JjU0NjcXBhUUMzI2NxcGIyMGFRQWMzI2NwEyFhUUBiMiJjU0NiW0jKebxxhwzcIWtH04ZUFJjKcJEDlEN2RD/Fg/V1c/P1dXAcX9IUeXdkAzTX6DnQzDCV1RHyG4RxQaJyoeIgGtV0JCV1dCQld6AAAAAAP8LfzaALQAUQAgACwALQAAEwYjIiY1NDcmNTQ2NxcGFRQzMjY3FwYjIwYVFBYzMjY3ATIWFRQGIyImNTQ2JbSMp5vHGHDNwha0fThlQUmMpwkQOUQ3ZEP8WD9XVz8/V1cBL/0hR5d2QDNNfoOdDMMJXVEfIbhHFBonKh4iAa1XQkJXV0JCV3oAAAAAAvvx/NoA0AATADMANAAAEwYjIiY1NDY3NjU0JiMiBgcnNjcmIyIGFRQWFwcmJjU0NjMyFhc2MzIWFRQHBgYVFDMyNwHQdoV+omZhCTMuO2Mb2RATNzovOmGZk7yVnnlVgUdppo6rMFxfY0th/SL9ETeAZVN3HSAcODpoYTw6KCoyKjt0cpyW0m58hyYuVJuDY2cHNCVKMQJOAAAAA/qx/NoBDAATADMAPwBAAAABBiMiJjU0Njc2NTQmIyIGByc2NyYjIgYVFBYXByYmNTQ2MzIWFzYzMhYVFAcGBhUUMzI3ATIWFRQGIyImNTQ2JQEMdoV+omZhCTMuO2Mb2RATNzovOmGZk7yVnnlVgUdppo6rMFxfY0th+no/V1c/P1dXA0H9ETeAZVN3HSAcODpoYTw6KCoyKjt0cpyW0m58hyYuVJuDY2cHNCVKMQHUV0JCV1dCQld6AAAE+rH82gEMABMAMwA/AEAAQQAAAQYjIiY1NDY3NjU0JiMiBgcnNjcmIyIGFRQWFwcmJjU0NjMyFhc2MzIWFRQHBgYVFDMyNwEyFhUUBiMiJjU0NiUFAQx2hX6iZmEJMy47YxvZEBM3Oi86YZmTvJWeeVWBR2mmjqswXF9jS2H6ej9XVz8/V1cCq/5S/RE3gGVTdx0gHDg6aGE8OigqMio7dHKcltJufIcmLlSbg2NnBzQlSjEB1FdCQldXQkJXem4AAAAAA/qx/NoBtwATADwASABJAAABBiMiJjU0NyY1NDcmIyIGByc2NyYjIgYVFBYXByYmNTQ2MzIWFzYzMhYVFRQHBhUUMzI3FwYHBhUUMzI3ATIWFRQGIyImNTQ2JQG3c4h/oQxzehk2O2Mb2RATNzovOmGZk7yVnnlVgUdppo6rAaZjS2E/cHUCY0th+c8/V1c/P1dXA0H9DDJvViIeOGlwPiZoYTw6KCoyKjt0cpyW0m58hyYuVJuDCwYGDEVAMZEvAwUKQDEB6VdCQldXQkJXegAAAAAC+/H82gF7ABMAPAA9AAABBiMiJjU0NyY1NDcmIyIGByc2NyYjIgYVFBYXByYmNTQ2MzIWFzYzMhYVFRQHBhUUMzI3FwYHBhUUMzI3AQF7c4h/oQxzehk2O2Mb2RATNzovOmGZk7yVnnlVgUdppo6rAaZjS2E/cHUCY0th/Hf9DDJvViIeOGlwPiZoYTw6KCoyKjt0cpyW0m58hyYuVJuDCwYGDEVAMZEvAwUKQDECYwAAAAAD+rH82gG3ABMAPABIAEkAAAEGIyImNTQ3JjU0NyYjIgYHJzY3JiMiBhUUFhcHJiY1NDYzMhYXNjMyFhUVFAcGFRQzMjcXBgcGFRQzMjcBMhYVFAYjIiY1NDYlAbdziH+hDHN6GTY7YxvZEBM3Oi86YZmTvJWeeVWBR2mmjqsBpmNLYT9wdQJjS2H5zz9XVz8/V1cCq/0MMm9WIh44aXA+JmhhPDooKjIqO3RynJbSbnyHJi5Um4MLBgYMRUAxkS8DBQpAMQHpV0JCV1dCQld6AAAAAAP86/3cAVIAAAALABcAGAAABTYzMgQXByYmIyIHJTIWFRQGIyImNTQ2Jf5mQFCoAROhuXiudi4//vE/V1c/P1dXAQdJEp3Sfp52DKJXQkJXV0JCV3oAAAAFAAAAAAY+BPoAKgA2ADcAOAA5AAABJiY1NDY2MzM1ITUhFSEVISIVFBc2MzIWFRQGIyAkJzcWBDMyNjU0JiMiATIWFRQGIyImNTQ2AQMBAtiCrFGRmWz8bwY+/mb+iHZCXmqwx+zf/vX+ZZ6yowEbrHBvSElZAjg8T088PFBQ/lym/osCASCVU0ReMD3i4vElJx4ajnh+isK9lK2eJioiHgGRUT09UVE9PVEBSPsGAQUAAAAEAAAAAAYxBPoAQABBAEIAQwAAARYWFRQGBCMiJjU0NyY1NDYzMhcHJiMiBhUUFzYzMhcHJiMiBhUUFjMyJDU0JiMiBhUUFhcHJDU0Njc1ITUhFSElAwEE1Xd9x/6R7u77K6LCsHNPFDxWQjlsMkQrNhUXKEM/c3bnASJHPionXU95/tBza/w+BjH+pP7tff0FA74fk2183H6Gdj41TIBjcg62DxsbKxAKB7ACGh4lIaJ8O0ofFykwEJlUs1NqEU7i4uL7BgEFAAMAAAAABHAE+gAbABwAHQAAASMiDgIVFBYzMjY3FwYjIiQ1NCQ3NSE1IRUhJREDUKtsdkkeiI1fql9V1vv3/uQBDOf9wwRw/uD+7QLlEScsGEM+JizZXMOmkrIFYeLi4vsGAAAEAAAAAAUPBPoAEgAhACIAIwAAASEVFhYVFAQhICQ1NCQ3NSE1IQEjIg4CFRQWMzI2NTQmAxEFD/5BqLD+5f7x/vf+1QEJ6/3CBQ/9wCw7Z2Y/j4+OjmT0BBhxMbl/mp/Cp5GyBmHi/esGHDcnREM9PTFKAif7BgAEAAAAAAWfBPoAKwAsAC0ALgAAASYmNTQ2NjMzNSE1IRUjFSEiBhUUFzYzMhYVFAYjICQnNxYEMzI2NTQmIyITAwEC2IKsUZGZlPxHBZ/T/ohwLkJearDH7N/+9f5lnrKjARuscG9ISVmAzv6LAgEglVNEXjA94uLxFg8nHhqOeH6Kwr2UrZ4mKiIeAtn7BgEFAAAABAAAAAAE7gT6AB8AKQAqACsAAAEjIg4CFRQXJjU0NjMyFhUUBCMgJDU0JCU1ITUhFSEDFRQXNjU0JiMiAxEDg52JglUpmwazrZ+t/vv3/uL+xwEoAQX9iATu/pWyAdItMHZZAvQQKTknbx0aHFtjbF50gLOkn7sCYOLi/cAKBAUCLxIaAtj7BgAAAAAFAAAAAAaNBPoAGwAnADMANAA1AAABFhYVFAYjIicGIyImNTQ2MzIWFzY3NSE1IRUhASYjIgYVFBYzMjY3FxYzMjY1NCYjIgYHExEFG3+L0LfOoXbZt97Qt2WtWE5/+/cGjf6O/bdiZUpjSjtUVjLeZGRJZEs+S1wwVwOkIax+oLRsbLuioLQ1OUsYbeLi/ppAU0pESFZrgEFTSkNJVGoCxvsGAAQAAAAABiwE+gAtAC4ALwAwAAABIgYHJzY3JiMiBhUUFhcHJiY1NDYzMhYXNjc1ITUhFSEVFhYVFAYHJzY2NTQmAxETBDhTeib/DxtSTzY7n4yN19rGml6cU1x6/DIGLP60anBIUeM1NTufBgLNg4JOOz87MCpIgkq/e/SIh5UxN0oUceLigSagbG26aJlBdDI5PgIt+wYBBQAA//8AAAAABj4E+gImC7oAAAEHC/ADRQGvAAAAAP//AAD/eQYxBPoCJgu7AAABBwmnA5wBJQAAAAD//wAAAAAEcAT6AiYLvAAAAQcL8AMIAcYAAAAA//8AAAAABQ8E+gImC70AAAEHC/ADFQG2AAAAAP//AAAAAAWfBPoCJgu+AAABBwvwA0UBrwAAAAD//wAAAAAE7gT6AiYLvwAAAQcL8AMRAbYAAAAA//8AAP+XBo0E+gImC8AAAAEHCacDoQFDAAAAAP//AAD/lAYsBPoCJgvBAAABBwmnAx8BQAAAAAAAAgAZAYQENQUPABUAIQAAASQnBgUnNjcmJjU0NjMyFhUUBgcWFyU2NjU0JiMiBhUUFgQl/tHaTP7VjLx0V1fMrbPOT099wf3fPzw9ODhBOQHPMFcvo79UOT6RUn6gk35XfkAgD4QpSS0wNToxKk0AAAX75P3cABYE+gAGAB4AHwAgACEAACUBJwEzAQclNjMyFhUUBiMiJCc3FhYzMjY1NCYjIgc3AQP+f/5cgwHopwEvq/4SZWWDqrmemv7ykZhiw3I4QjIrOz1fASYFWP7+vAEG/rx+XSeUbXKLc3uRYFsfIR4eGPwBBQP1AAAD++T82gAWABYABgAeAB8AAAUBJwEzAQclNjMyFhUUBiMiJCc3FhYzMjY1NCYjIgcT/n/+XIMB6KcBL6v+EmVlg6q5npr+8pGYYsNyOEIyKzs9X6r+/rwBBv68fl0nlG1yi3N7kWBbHyEeHhgB/gD///sY/NoAFgAWAiYLzAAAAQcL8P3bADIAAAAAAAT8WP3cALwE+gAfACAAIQAiAAADBiMiJjU0NjMyFycBJwEzEwcWFwcmJiMiBhUUFjMyNwMBA91lZYemu4xQQ4f+XIMB6Kf3hLWtsmm7aS48MitAN9ABJgX+AyeRcG6PE5H+/rwBBv7yYlHhc5WEIx0eHhsBRQEFA/UAAAAC/Fj82gC8ABYAHwAgAAADBiMiJjU0NjMyFycBJwEzEwcWFwcmJiMiBhUUFjMyNwPdZWWHpruMUEOH/lyDAein94S1rbJpu2kuPDIrQDfQ/QEnkXBujxOR/v68AQb+8mJR4XOVhCMdHh4bAkcAAAD///sY/NoAvAAWAiYLzwAAAQcL8P3bADIAAAAAAAX8WP3cABYE+gAGABgAGQAaABsAACUBJwEzAQcTBiMiJjU0NjcXBgYVFDMyNjcBAQP+f/5cgwHopwEvq2x9ppTCs6gcUVF9Qmwq/o8BJgVY/v68AQb+vH7+x0GVcnuVDrQLKypNJBYBJgEFA/UAAAAD/Fj82gAWABYABgAYABkAAAUBJwEzAQcTBiMiJjU0NjcXBgYVFDMyNjcB/n/+XIMB6KcBL6tsfaaUwrOoHFFRfUJsKv6Pqv7+vAEG/rx+/sdBlXJ7lQ60CysqTSQWAigAAAD///sY/NoAFgAWAiYL0gAAAQcL8P3bADIAAAAAAAX8WPzaADIE+gAGACkAKgArACwAACUBJwEzAQcTBiMiJjU0NyY1NDY3FwYGFRQzMjY3FwYjIyInBhUUMzI2NwEBA/5//lyDAeinAS+rx32mlMIParSnHEtXfUJsKk99phEJCQN9Qmwq/jQBJgVY/v68AQb+vH79xUGVcjEqR3RvjQ6qCiopSyQWrkEBCgxWJBYCNwEFA/UABfwt/NoBDAT6AAYAOgA7ADwAPQAAJQEnATMBBwEGIyImNTQ2NzY1NCYjIgYHJzY3JiMiBhUUFhcHJiY1NDYzMhYXNjMyFhUUBwYGFRQzMjcBAQP+f/5cgwHopwEvswGpdId+omZhCTUsOmQb2RASNTsyN2Cak7uWnnlRgktppY6rMFphY0xg/UoBJgVY/v68AQb+vHT9pStjT0FcFhgXLStRSy8sHyEoHy1ZWnp0o1ZhaBsmQXhmTU8FKR05JgJ+AQUD9QAAAAAF/C382gG3BPoABgBDAEQARQBGAAAlAScBMwEHAQYjIiY1NDcmNTQ3JiMiBgcnNjcmIyIGFRQWFwcmJjU0NjMyFhc2MzIWFRUUBwYVFDMyNxcGBwYVFDMyNwEBA/5//lyDAeinAS+zAlRyiYGfDHN6GDc6ZBvZDxQ3OjI3YJqTu5aeeVGBS2qljqsBpmNMYD9xdAJjSGT8nwEmBVj+/rwBBv68dP2hJ1dCGhgrUVYxHlFLLykjICgfLVlaenSjVmFoHCVBeGYJBAUJNTImcSUBBAgyJgKPAQUD9QAABfxY/dwAYwT6AAYAFAAVABYAFwAAJQUnATMBByU2MzIeAhcHJiYjIgcTAQP+f/5ljAHykwE2uv4nP21Mg3pqhIWTtGlGPm0BJgUw+78BEP6wfx8RGTM/VqhpRwwBgAEFA/UAAAP8WPzaAGMAAgAGABQAFQAABQUnATMBByU2MzIeAhcHJiYjIgcT/n/+ZYwB8pMBNrr+Jz9tTIN6aoSFk7RpRj5t0vu/ARD+sH8fERkzP1aoaUcMAoIAAP//+xj82gBjAAICJgvYAAABBwvw/dsAMgAAAAAAA/wi/NsALwAAABcAGAAZAAAFNjMyFhUUBiMiJCc3FhYzMjY1NCYjIgcTA/3kenKivcesvv7Ho7t81nxISz86SVIfufIuq4qGppu5hoyANTEuMSIBtP7/AAP9BPzbAREAAAAXABgAGQAAAwYjIiY1NDYzMgQXByYmIyIGFRQWMzI3AwGxenKdwsmquwE4p8dw2nhGTT86SVLA/uP9CS6qf42rwul4tKE1MS8wIgI1/v8AAAAAA/3H/NsAXAAAABAAEQASAAATBiMiJjU0NjcXBhUUMzI2NwEBXIynm8fOwRi2fTRlP/48AZ/9Ikemg5GuDdIMal8gIAIX/v8AAv3H/dwAtABFAB4AHwAAEwYjIiY1NDcmNTQ2NxcGFRQzMjcXBiMjBhUUMzI2NwG0i6idxRhwy8QWtH1kekmMpwoPfTtqOf3e/g0xalEuIjZXW24IiAVCOC2AMg0TOBgUAXMAAAL78f3cANAAGgA2ADcAABMGIyImNTQ2NzY1NCYjIgYHJzY3JiMiBhUUFhcHJiY1NDYzMhYXNjMyFhUUBgcjIgYGFRQzMjcB0HSHgKBmYQk1LDtjG9kOFTY7Mjd2hJO8lZ16VoNEaKeOqxsVDxZjM2NJY/1A/gImWkU6UxQXEykmSUMqJR8dJBwvVkRsaJJNVl4bHzpsWypDIBEfEjQiAY4AAAAAAvvx/dwBewAaADwAPQAAAQYjIiY1NDcmNTQ3JiMiBgcnNjcmIyIGFRQWFwcmJjU0NjMyFhc2MzIWFRUUBwYVFDMyNxcGBwYVFDMyNwEBe26Nf6EMc3oZNjtjG9kMFjQ8Mjd2hJO8lZ16VoNEZ6iOqwGmY05eP292AmNOXvyV/f8jTTwYFSdJTyoaSUMqICQdJBwvVkRsaJJNVl4bHzpsWwgEBAgwLSJlIQEEBywiAZwAAAAAA/40/NsBIAAAAAsADAANAAABNjMyBBcHJiYjIgcDAf40QFCoAROhuXiudi4/FQE7/rYSndJ+nnYMAh3+/wAAAAIAAP92BMoE+gA6ADsAAAEmIyIVFBYzMzI3JjU0NjMyFhUUBxYXByYnBiMiJDU0NyY1ND4CMzM1ITUhFSERISIGBhUUFzYzMhcDAwdAQN1/hxQICANgTWd4bCtP3TZFIEz7/uxAhDNih3iV/YMEyv7E/l0/MBwsZ2lrUpwB4AdlNTEBCxc4QFlEYitJcF5UmQaZk1JAVnc2TzcXXeLi/uIOHRQeHyAJAmUAAAADAAD/RQdHBPoATQBOAE8AAAE0Njc1ITUhFSERIyIGBhUUFjMzMjcmNTQ2MzIWFRQGBxMHAwUXFhUUBiMiJiY1NDc3JyYmIyIGFRQWMzI3FwYjIiY1NDYzMhYXFzcmJgEBAv747fsdB0f+rmmilEaNjBAJCwJiS2Z5MjmS8of96ggTUT8+l1tZEVAmPiknLScgLiQ3SmFsiLKPi549Rc10ewHlAQQCbJKrC2Ti4v7OIT8uRkkBDxo7Q2NNNEga/pBIAYjbFjQcNEE+XThBJAfHXTgqISQnEKIbhmt/kXmYq1EqnwL++wYAA/0gBPoAdwcsAAsAGQAaAAABMhYVFAYjIiY1NDYFBgYjIiYnNxYWMzI2NwH+0C5CQi4uQkIB1R3Yt5rWO74hd15eZBT+nwcsQi4uQkIuLkJLxMbDw0+Ni4aS/c4AAAAAA/utBPoBjwcsABgAJAAlAAABBgYjIicGIyImJzcWFjMyNyc3FhYzMjY3IzIWFRQGIyImNTQ2AQGPHdi3unVn1J3tQtIocVOlLga+IXdeXmQU2i5CQi4uQkL+jwbhxMaTq7ivRXBtvxBPjYuGkkIuLkJCLi5C/c7///vzBOsA+wcsAiYJsQAAAQcL4wCEAAAAAAAA///8fwTrAQsHLAImCbIAAAEHC+MAlAAAAAAAAP///FsE6wELBywCJgmzAAABBwvjAJQAAAAAAAD///3tAAADzwcsAiYJqQAAAQcL5AJAAAAAAAAA///+TgAAA1YHLAImCakAAAAnCbECWwAAAQcL4wLfAAAAAAAA///+2gAAA2YHLAImCakAAAAnCbICWwAAAQcL4wLvAAAAAAAA///+tgAAA2YHLAImCakAAAAnCbMCWwAAAQcL4wLvAAAAAAAA///96gAAA1sHLAImCasAAAEHC+MC5AAAAAAAAP///ZsAAAM+BywCJgueAAABBwvjAscAAAAAAAD///0CAAADFQcsAiYLnwAAAQcL4wKeAAAAAAAA///8LgAAAtMHLAImC6AAAAEHC+MCXAAAAAAAAAAB/T3+a/48/28ACwAABTIWFRQGIyImNTQ2/b02SUk2NkpKkUo4OEpKODhKAAIBD//lAm0FtgADABcAAAEjAyEBND4CMzIeAhUUDgIjIi4CAjr0MwFa/qIbMEAlIz8wHBwwPyMlQDAbAeUD0frZL0EoEhIoQS8tQCoTEypAAAAAAAQA3wPGA5wHLAADAAcACAAJAAABAyMDIQMjAwMTAfYpxSkCvSnFKdKEBdb98AIQ/fACEAFW/c4AAgBPAAAFBQUPABsAHwAAAQczFSEDIxMjAyMTIzUhNyM1IRMzAzMTMwMzFQUzNyMD+y/1/uQ+3D/CPdc74AEHL+4BEzzbPMY91z3j/SrEL8QC/+jO/rcBSf63AUnO6NEBP/7BAT/+wdHo6AAABQBS/+4G4AUPAAoAHgAiAC0AQQAAARQWMzI2NTQjIgYFFA4CIyIuAjU0PgIzMh4CAQEjARMUFjMyNjU0IyIGBRQOAiMiLgI1ND4CMzIeAgFEMjc2NGo3MgHFKVaEW1WBVywoVYJaVoNYLQKR/N/lAyFmMjc2NGo3MgHFKVaEW1WAVywoVIJaVoNYLQOcUlFQU6JQUFeLXzMzX4tXV4peMjJeigEF+wYE+vxlUlFQU6JQUFeKXzMzX4pXV4peMjJeigAAAAMA3wPFAjcHLAADAAQABQAAAQMjAxMTAfYpxSnUhAXV/fACEAFX/c4AAAAABACH/dwCrgcsABMAFAAVABYAABM0EhI2NzMGAhEUEhIWFyMmJgICAQMThyRKcU76jZAkSGpF+E5xSiQBogUfAqGPARYBBvFq3f3w/uqI/vL+/vNrZ+sBAQETAuYCMvawAAAAAAQAaf3cApAHLAATABQAFQAWAAABFAICBgcjNhIRNAICJiczFhYSEgEDEwKQJEpxTvqNkCRIakX4TnFKJP5tBR8CtI/+6v768WrdAhABFogBDgEC82tn6/7//u0BuQIy9rAAAAADAIgCNwRmBywADgAPABAAAAEDJRcFEwcDAycTJTcFAwMTAvkpAXUh/qzf45yJ7N3+ricBbSmQhAX1/pBo/Bj+13kBOf7JdwEpGvpoAXABN/3OAAABAGwAvAQkBHQACwAAASE1IREzESEVIREjAdv+kQFv2wFu/pLbAijbAXH+j9v+lAABAD//DAHLASAADAAAARcOAwcjPgM3AbwPDicvMxncDx0bFgcBIBg6gYODO0GMioU4AAAAAQBqAeQCgwLeAAMAABM1IRVqAhkB5Pr6AAAAAAEAdf/lAdMBOQATAAA3ND4CMzIeAhUUDgIjIi4CdRswQCUjPzAcHDA/IyVAMBuPL0EoEhIoQS8tQCoTEypAAAAEAEj93AN+BywAAwAEAAUABgAAAQEhAQMDEwN+/dD++gIwswUfBqn4BAf8/lECMvawAAAAAgA//+wEKQXNABMAIQAAARQCBgYjIiYmAjU0EjY2MzIWFhIFFBYzMjY1NCYjIg4CBCk3eb+Hf7x8PTd4vod+vH4+/UpWamhbW2g1SS4UAtux/urCZmbCARaxsQEYwmdmwv7osvr8+vz7/UB+vQAAAAEAXAAAAzEFtgAQAAAhIRE0PgI3DgMHBycBMwMx/ssBAwMBBRgeIA+olgHX/gNOGklPUCEGGB0eDIe6AXcAAAABAE4AAAQnBcsAIQAAISE1AT4DNTQmIyIGByc+AzMyHgIVFA4CBwcVIQQn/CsBWEFqTCpZS0+SUKgtYnaOWGmndj88aIlOsAJU1wFzRnZwdENKUk5IxylLOiM6a5hfVp2XlU2xDgAAAAEAOf/sBBkFywA7AAABFA4CBxUWFhUUDgIjIiYnER4DMzI+AjU0LgIjIzUzMj4CNTQmIyIOAgcnPgMzMh4CA+4xVXNDsbZFj9mTdtBaLWRkYStWckQdJVOGYmhmXHpJHmFpMFNHOxicKmN0hkxssX5FBG9MeVs9EAYWq5Fgo3hDJygBBxgkGQwgOlEwLUkzHNkhOUwrTlgTHSMRzh80JxYvWYEAAgAEAAAEPQW2AAoAGAAAASMRIREhNQEhETMhNTQ+BDcjBgYHAwQ9sP7T/aQCbQEcsP4jAQIDAwMBCRItHfQBL/7RAS/XA7D8afgNMT5CPC0KKl4v/o4AAAEAVv/sBBIFtgAoAAABMh4CFRQOAiMiLgInER4DMzI2NTQmIyIOAgcnEyERIQM2NgJWXqJ4REqQ1Yo3bGNZJCNcY2QthoyJjxo7OTQTezcDBP4IGCBVA6Y6cKdsd72DRgoTHhQBCxQjGQ9veWxxBgoLBkIC6f76/uEHDgACAEz/7AQrBccAKQA7AAATND4EMzIeAhcVJiYjIg4CBzM+AzMyHgIVFA4CIyIuAgUyPgI1NCYjIg4CFRQeAkwXO2Wb2ZEVMjMwEyZVK4euZisFDBQ5TF87X5hpOEN8sG5svItPAfwpQzEbWVsuTDYdGTNLAm1p0L+keUUCAwYE9wkLQ3ioZiQ/LRo+dqxvd7yDRU2e8eUfP2BCa3skOkglM2VRMgAAAAABADcAAAQnBbQABgAAMwEhESEVAc8CCP1gA/D96wSwAQTC+w4AAAAAAwBI/+wEIQXJACcAOgBOAAABMh4CFRQOAgceAxUUDgIjIi4CNTQ+AjcuAzU0PgIDFB4CMzI2NTQuAicnDgMTIg4CFRQeAhc+AzU0LgICNVuiekgoRmA4Om9WNEiCtW12uH5BLExmOjFWPyVJfKJ2GjNMMmloIzdGIxYsSDMczSE5KRgZKzkgHzgrGhgqOgXJLFiEWUJrV0QcH0xfdklblGg4NmSSW0t4YEocH0lZbEFXg1ks+7woQzAbY1EqQzkwFg4WMz1IAzgUJjgjKj0vJRIQJjE+KCM4JhQAAAIAP//sBB8FxwApADsAAAEUDgQjIi4CJzUWFjMyPgI3Iw4DIyIuAjU0PgIzMh4CJSIOAhUUFjMyPgI1NC4CBB8XO2Wb2ZIVMjMwEiVVLIeuZisFDRQ4TGA7X5hpOEN8sW5svIpQ/gQpRDEbWlsuTDYdGTNLA0Zp0b6leEUCAwUE+AoLQ3moZSQ+Lho+dqxvd7yDRk2e8uUeP2FCanwkOkglM2VRMgAAAAIAnP/lAfoEcwATACcAADc0PgIzMh4CFRQOAiMiLgIRND4CMzIeAhUUDgIjIi4CnBswQCUjPzAcHDA/IyVAMBsbMEAlIz8wHBwwPyMlQDAbjy9BKBISKEEvLUAqExMqQANnL0EoEhIoQS8tQSkTEylBAAAAAgBm/wwB+gRzAAwAIAAAARcOAwcjPgM3AzQ+AjMyHgIVFA4CIyIuAgHjDw4nLzMZ3A8dGxYILxswQCUjPzAcHDA/IyVAMBsBIBg6gYODO0GMioU4AqkvQSgSEihBLy1BKRMTKUEAAAABAGwAjwQkBMQABgAAJQE1ARUBAQQk/EgDuP19AoOPAbaPAfDw/sP+5wAAAAIAbAFmBCQDxAADAAcAABM1IRUBNSEVbAO4/EgDuALr2dn+e9vbAAABAGwAjwQkBMQABgAAEwEBNQEVAWwCg/19A7j8SAF+ARkBPfD+EI/+SgAAAAIAf//lA9sFywAnADsAAAE1ND4CNz4DNTQmIyIGByc+AzMyHgIVFA4CBw4DFRUBND4CMzIeAhUUDgIjIi4CAW4VK0QwKjokEE1PRZ9VZitlbXA2ZqRyPRw3UzcqNR4L/tcbMEElIz8wHBwwPyMlQTAbAeVKM1NLRyYhNDM4JTlKOirdGS0jFDFehlY/Y1VPLCExLC8gPP6qL0EoEhIoQS8tQCoTEypAAAQA1v3cAroHLAAHAAgACQAKAAABIREhFSMRMwMDEwK6/hwB5ODgkQUf/q0H+9P5qwV6AjL2sAAABABI/dwDfgcsAAMABAAFAAYAAAEBIQEBAxMBTgIw/vr90AHhBR8GqfgEB/z+UQIy9rAAAAQAZP3cAkgHLAAHAAgACQAKAAATIREhNTMRIxcDE2QB5P4c4OCZBR8GqPgF0wZV2wIy9rAAAAAAAQAtAggEYgW+AAgAABMBMwEjAQYCBy0BtpAB7+/+vkWPRAIIA7b8SgKDof66nAAAAf/8/rwDTv9IAAMAAAEhNSEDTvyuA1L+vIwAAAQATv3cAwQHLAAoACkAKgArAAATNTI+AicRND4CMxUOAxURBgcVFhYHERQeAhcVIi4CNRE0JhMDE04+YUIhAiZjqoMoQS0YBuRzegMYLUEog6pjJoMyBR8CMu8WMk84AXNWe04l4QEPJUAz/pvfKgwUhHD+mzNAJQ8B4iVOfFYBb3FgAsgCMvawAAAABAFc/dwClgcsAAMABAAFAAYAAAEzESMDAxMBu9vbWgUfBqf4BgZNAjL2sAAEAGT93AMaBywAKAApACoAKwAAASIOAhcRFA4CIzU+AzURNjc1JiY3ETQuAic1Mh4CFREUFjMDAxMDGj5hQiECJmOqgyhBLRgG5HN6AxgtQSiDqmMmg32NBR8CMxYyTzj+jVZ7TiXhAQ8lQDMBZd8qDBSEcAFlM0AlDwHiJU58Vv6RcWAB2AIy9rAAAAEAbAHrBCQDQQAkAAABLgMjIg4CBzU2NjMyHgIXHgMzMj4CNxUGIyIuAgIkJTkvKhcdPjw5GjN/Th4zOUYwJjkwKhYdPjw5GWWbHjM5RgIsEBYNBRMhLBnnNjcFDhkUEBUNBRMgLRnnbQUNGQAAAP//AGoB5AKDAt4DBgv7AAAAAAAAAAEAUgHwA64C1gADAAATNSEVUgNcAfDm5gAAAAABAFIB8AeuAtYAAwAAEzUhFVIHXAHw5uYAAAAAAwCfA+ACKwcsAAwADQAOAAATPgM3Mw4DByETE58OJy8zGdwPHRsWCP7o8YQD9jd5fXo4PISEfDUDTP3OAAAAAAMAtQPgAkAHLAAMAA0ADgAAAQ4DByM+AzchAxMCQA4nLzMZ2w4dGxYIARiShAW/N3l9ejg8hIR8NQFX/c4AAAAEAJ8D4AP9BywADAAZABoAGwAAAT4DNzMOAwchJT4DNzMOAwchExMCcg4nLzMZ2w4dGxYI/uj+Hg4nLzMZ3A8dGxYI/ujxhAP2N3l9ejg8hIR8NRY3eX16ODyEhHw1A0z9zgAABAC1A+AEEwcsAAwAGQAaABsAAAEOAwcjPgM3IQUOAwcjPgM3IQETAkAOJy8zGdsOHRsWCAEYAeIOJy8zGdwPHRsWCAEY/ZuEBb83eX16ODyEhHw1Fjd5fXo4PISEfDUBV/3OAAMAdf/lBmIBOQATACcAOwAANzQ+AjMyHgIVFA4CIyIuAiU0PgIzMh4CFRQOAiMiLgIlND4CMzIeAhUUDgIjIi4CdRswQCUjPzAcHDA/IyVAMBsCRxswQSUjPzAcHDA/IyVBMBsCSBswQCUjPzAcHDA/IyVAMBuPL0EoEhIoQS8tQCoTEypALS9BKBISKEEvLUAqExMqQC0vQSgSEihBLy1AKhMTKkAAAAEAgQDQBBAEXgALAAABATcBARcBAQcBAScBrP7VlwEtATGa/s8BLZb+z/7TlQKXAS2a/tUBK5b+z/7RmAEt/tWYAAAAAwBsAKEEJASLAAMAFwArAAATNSEVBTQ+AjMyHgIVFA4CIyIuAhE0PgIzMh4CFRQOAiMiLgJsA7j9mBYmMxwcMiYXFyYyHBwzJhYWJjMcHDImFxcmMhwcMyYWAijb2+8qOiMQECM6Kig6JRERJToC4io6JBAQJDoqKDklERElOQABAGwCKAQkAwMAAwAAEzUhFWwDuAIo29sAAAAAAQB8AAAEIAT6ABsAAAEGBgcBIQE3MzI2NyE1IS4CIyM1IRUhFhchFQL1GKOMAZH+rv6PJjWXYxL+mQFkEDlQVHcDpP59LhwBOQLyd5gh/j4Bs8RFNs4nMhPOziRIzgAAAf0n/lT+U/+GAAsAAAUyFhUUBiMiJjU0Nv29P1dXPz9XV3pXQkJXV0JCVwAAAA8AugADAAEECQAAAF4AAAADAAEECQABABIAXgADAAEECQACAAgAcAADAAEECQADAEIAeAADAAEECQAEABwAugADAAEECQAFABgA1gADAAEECQAGABoA7gADAAEECQAHAKQBCAADAAEECQAIACoBrAADAAEECQAJACgB1gADAAEECQAKAEAB/gADAAEECQALADwCPgADAAEECQAMAIgCegADAAEECQANAFwDAgADAAEECQAOAFQDXgBDAG8AcAB5AHIAaQBnAGgAdAAgADIAMAAxADIAIABHAG8AbwBnAGwAZQAgAEkAbgBjAC4AIABBAGwAbAAgAFIAaQBnAGgAdABzACAAUgBlAHMAZQByAHYAZQBkAC4ATgBvAHQAbwAgAFMAYQBuAHMAQgBvAGwAZABNAG8AbgBvAHQAeQBwAGUAIABJAG0AYQBnAGkAbgBnACAALQAgAE4AbwB0AG8AIABTAGEAbgBzACAAQgBvAGwAZABOAG8AdABvACAAUwBhAG4AcwAgAEIAbwBsAGQAVgBlAHIAcwBpAG8AbgAgADEALgAwADQATgBvAHQAbwBTAGEAbgBzAC0AQgBvAGwAZABOAG8AdABvACAAaQBzACAAYQAgAHQAcgBhAGQAZQBtAGEAcgBrACAAbwBmACAARwBvAG8AZwBsAGUAIABJAG4AYwAuACAAYQBuAGQAIABtAGEAeQAgAGIAZQAgAHIAZQBnAGkAcwB0AGUAcgBlAGQAIABpAG4AIABjAGUAcgB0AGEAaQBuACAAagB1AHIAaQBzAGQAaQBjAHQAaQBvAG4AcwAuAE0AbwBuAG8AdAB5AHAAZQAgAEkAbQBhAGcAaQBuAGcAIABJAG4AYwAuAE0AbwBuAG8AdAB5AHAAZQAgAEQAZQBzAGkAZwBuACAAdABlAGEAbQBEAGUAcwBpAGcAbgBlAGQAIABiAHkAIABNAG8AbgBvAHQAeQBwAGUAIABkAGUAcwBpAGcAbgAgAHQAZQBhAG0AaAB0AHQAcAA6AC8ALwBjAG8AZABlAC4AZwBvAG8AZwBsAGUALgBjAG8AbQAvAHAALwBuAG8AdABvAC8AaAB0AHQAcAA6AC8ALwB3AHcAdwAuAG0AbwBuAG8AdAB5AHAAZQBpAG0AYQBnAGkAbgBnAC4AYwBvAG0ALwBQAHIAbwBkAHUAYwB0AHMAUwBlAHIAdgBpAGMAZQBzAC8AVAB5AHAAZQBEAGUAcwBpAGcAbgBlAHIAUwBoAG8AdwBjAGEAcwBlAEwAaQBjAGUAbgBzAGUAZAAgAHUAbgBkAGUAcgAgAHQAaABlACAAQQBwAGEAYwBoAGUAIABMAGkAYwBlAG4AcwBlACwAIABWAGUAcgBzAGkAbwBuACAAMgAuADAAaAB0AHQAcAA6AC8ALwB3AHcAdwAuAGEAcABhAGMAaABlAC4AbwByAGcALwBsAGkAYwBlAG4AcwBlAHMALwBMAEkAQwBFAE4AUwBFAC0AMgAuADAAAwAAAAAAAP9mAGYAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAMACAAKAAkAB///AA8AAQAAAAwCOAZOBlYAAgBcAAAAQgABAEMAQwADAEQAaQABAGoAagADAGsAdQABAHYAdgADAHcAeQABAHoAegADAHsBSgABAUsBVQADAVYCMwABAjQCNQACAjYCNgADAjcCNwABAjgCOgADAjsCTgABAk8CTwADAlACXwABAmACZAADAmUCiwABAowCjwADApADcgABA3MDfQADA34EpwABBKgErwADBLAEswABBLQExAADBMUE0AABBNEFLQADBS4FLgABBS8FUQADBVIGRwABBkgGVAADBlUHlQABB5YHmgADB5sHpAABB6UHpwADB6gHsQABB7IHtAADB7UHwQABB8IHxAADB8UHzgABB88H0AADB9EIhwABCIgIjgADCI8JPgABCT8JVwADCVgJaQABCW4JbwADCXAJlQABCZYJlgACCZcJnQABCZ4JngACCZ8JoAABCaEJoQACCaIJpgABCacJpwADCagJqwABCawJswADCbQJtwABCbgJuAADCboJvQADCb4JxQACCcYJxwABCcgJyQADCdgJ3QABCd8J4AADCeEJ4QABCeIKDgACCg8KEAADChELBwACCwgLEwADCxQLHwACCyALIAADCyELYwACC2QLZAABC2ULaAACC2kLaQABC2oLbAACC20LeAABC3kLnAACC50LnQADC54LoAABC6ELqQACC6oLuQADC7oLwQABC8ILygACC8sL4AADC+EL4gACC+ML5wADC+gL7wACC/AL8AAEAWYAsQQCA/QDQAOIAuwC8AMiAxoDygLMA6YDrAOyA7gDCgO+AuAEEgQSAtIC0gQKBAoD5ALYBA4ECgQOBBID9ALcAtwC3APKAxoDBgLgA1gC9AOUA/AC5gNAA4gC7ALwAyIDGgNYAvQDHgN2Av4DAgO+A+QC+AMeA3YC/gMCAxoDBgMKAxADFgL4Ax4DdgL+AwIDGgMGAwoDEAMWAxoDHgPYA9gDIgMaAx4DUAPYAyIEAgOMA24DiAMmAyoEAgQKA4gDLgMyAzYDjAM6A0ADnANEA1gDSANMA1ADVANYA/wDoANcA/wDaANiA2gDbgPKA34DcgN2A3oDnAPKA34DfgN+A4IEBgQGA9QDiAOIA4wDjAOQA5ADlAOYA5gDnAPwA5wECgOgA6YDrAOyA7gDvgPEA8oD6APoA9AD0APUA9QD2AP4A9wD4APgBAoECgPkA+gD7APwA/QD+AP8BAIEBgQKBA4EEgABALEJbglvCXQJdgl3CXgJeQl8CYIJhgmICYwJjQmOCY8JkwmYCZ0JngmgCaEJrAmtCa4JrwmwCbEJsgmzCbgJugm8Cb0JvgnCCcMJxAnGCccJyAnJCdwJ5QnnCegJ6QnqCe0J8wn0CfcJ+Qn8Cf0KAQoPClcKWwpdCmEKYgpjCmQKaApsCnMKewp/CoEKhQqGCocKiAqMCpAKlwr7CvwK/Qr+Cv8LAAsBCwILAwsECwgLCQsKCwsLDAsNCw4LDwsQCxELEgsTCyALNQs4CzkLOgs7Cz0LPwtAC0ELQwtHC0gLSQtKC0sLTAtOC1oLXAtdC14LXwtiC2MLZAtlC2YLZwtoC6oLqwusC60LrguvC7ALsQuyC7MLtAu1C7YLtwu4C7kLugu7C7wLvQu+C78LwAvBC8sLzAvOC88L0QvSC9QL1QvWC9cL2AvaC9sL3AvdC94L3wvgC+EL4gvjC+QL5QvmC+cAAgA4ADkAAgA2ADcAAQAhAAEABAACAAIAKwACADEAMgABADUAAQA+AAEATQACAAIALwABABwAAQAiAAEAKwACACsALAACAAIAKQABADYAAQAsAAEAOAABADoAAQAjAAEALwABACcAAQAmAAEAMgACABgAKwABADMAAQA5AAEAPwABAFAAAQBOAAEAZgABAE8AAgA/AEAAAgBWAFcAAgA7ADwAAQApAAEARgABAEEAAQBLAAEALgACAE8AUAABACQAAQAdAAEALQABADQAAQBAAAEASQACADcAOAACAEEAQgACABwAHQACACIAIwACACwALQACACoAKwACADQANQACAC4ALwABACAAAQAZAAEAKgABAEQAAQAVAAEAEQABAB8AAQA3AAEAPQABAAwAAQA7AAIATgBPAAEAGgABACUAAQAYAAEADgABABsABAAAAAEAAAACAB8CYAJiAAICZAJkAAMCjAKPAAIDcwNzAAIE5AT0AAIE9gT5AAME+gT6AAIE/AUAAAMFAwUFAAMFCAUSAAMFGAUbAAMFHAUeAAIFIQUjAAIFJAUkAAMFJQUlAAIFJgUoAAMFKQUrAAIFLAUtAAMFLwUxAAIFMgU1AAMFNgU2AAIFOAU5AAMFOgU6AAIFQgVOAAIGSAZJAAIGSgZKAAMGSwZRAAIGUgZSAAMGUwZTAAIGVAZUAAMIiwiOAAEAAQAAAAoAcAD6AAVjeXJsACBkZXYyADBkZXZhADBncmVrAEZsYXRuAFYABAAAAAD//wADAAAAAwAGAAoAAU1BUiAACgAA//8AAwAJAAoACwAEAAAAAP//AAMAAQAEAAcABAAAAAD//wADAAIABQAIAAxrZXJuAEprZXJuAEprZXJuAEptYXJrAFBtYXJrAFZtYXJrAFZta21rAGxta21rAGxta21rAGxhYnZtAHRibHdtAHpkaXN0AIAAAAABAAsAAAABAAEAAAAJAAAAAQACAAMABAAFAAYABwAIAAAAAgAJAAoAAAABAAwAAAABAA0AAAADAA4ADwARABQAKiSSJdQodCp6PehOAk7+XChdDGAiYjygWqTuqICr2K0urUSupK60AAQAAAABAAgAAQAMAGQAAwFGA2oAAgAOAmACYgAAAmQCZAADAowCjwAEA3MDcwAIBOQE+gAJBPwFAAAgBQMFBQAlBQgFEgAoBRgFHgAzBSEFLQA6BS8FNgBHBTgFOgBPBUIFTgBSBkgGVABfAAIAJQAkAD0AAABEAF0AGgCCAJgANACaALgASwC6AUIAagH6AgEA8wI3AjcA+wJOAk4A/AJTAlYA/QJcAl8BAQMaAx0BBQMyAzcBCQNCA0kBDwNeA2EBFwNsA3EBGwN+A7MBIQO3A7gBVwO6A7oBWQO/A88BWgPYA9gBawPdBBkBbAQeBB8BqQQiBJ4BqwX0BiICKAZVBloCVwZdBmYCXQZrBm4CZwZxBoACawaDBpwCewalBqwClQavBsgCnQbNBuYCtwgDCAMC0QhRCFEC0ghiCGgC0whzCHMC2gh2CHYC2wBsAAABxAAAAcQAAAHEAAIBsgAAAbgAACQ+AAABvgAAAb4AAAHEAABdhgAAXYYAAAHKAABdhgAAAdAAAF2GAAAB1gAAXYYAAAHcAABdhgAAAeIAAAHiAABdhgAAXYYAAAHoAAAB6AAAAegAAQHuAAJM8gACTPIAAkzyAAJM8gAAXYYAAgH0AAJM8gACTPIAAkzyAAJM8gACTPIAAkzyAAJM8gACTPIAAkzyAAJM8gACTPIAAkzyAAJM8gACTPIAAkzyAAJM8gACTPIAAgIGAAIB+gACTPIAAkzyAAJM8gAAXUoAAF1KAABeOAAAXYYAAAIAAABdhgACTPIAAF2GAAICBgACTPIAAkzyAABdhgAAAh4AAF2GAAJM8gACTPIAAAIeAAACHgAAXYYAAkzyAAJM8gACTPIAAkzyAABdjAACTPIAAkzyAAACHgAAAgwAAAIMAAACDAAAAgwAAAIMAAACDAAAAgwAAAIMAAACDAAAAgwAAAIMAAACDAAAAgwAAAISAAACEgACTPIAAF2GAABdhgAAXYYAAF2GAABdhgAAXYYAAF2GAAICGAAAAh4AAkzyAAH9iwAAAAECOgR+AAECTwSwAAH9iwSwAAEAAAXIAAEAAATOAAH9lASwAAH/sASwAAEAAASIAAEAAAOEAAH/ZgOZAAEAFAAAAAH/7AAAAAEAAARMAAEAAABkAAEAAAHgAAEAAATiAAH/4gAAAAEAAAR+AtwT+h6GGA4cuCC0WHoRfh9kV2Yc6DVgV4QhtB6GVdohtB6GVdoWfB0kUow0TB80HUgdYB1mF2wZBCCWESodih9kSz4goiCoS1Yd8B32Hd4eLDjkHho37hqoVeAgriC0V4o37hqoETA0+iC0U+gegB6GS1Yf9CC0HuwfLh80GNofXh9kH1IfiB+OH3wRNh/KVdoZNB/KS1Yf9B/6H9wh5BL+SyYc7iCoHLIanBHAHNAc7hz0WHoh6hL+S1YcIh0SG6QcahFIEU4gGCHYV4odciCWIJwdciCWGswdlh2cSyYRPCCWIJweAhFCHeofQBHwV4oh/BHkV4ocahHwHkocahFIEU4eaBFUHlwejBFaHqQfEBlYHlwfQBHwHLIiAiCKS0ofmhFgIDwcWCCKS0oiAiCKIE4gBhHAG8IYAiC0GA4YAiC0GA4YAiC0GA4RZiC0GA4RZiC0GA4RbCC0GA4RchT2EXgRfh9kEYQdBh6GVdodBh6GVdodBh6GVdohoh6GVdoXZh1mF2wXZh26F2wXZiBsF2wSgCBsF2wc6DVgV4QRijjkHhoYLBqoVeAYLBqoVeAYLBqoVeARkBqoVeARkBqoVeA37hqoVeAXnB80GNoXnB80GNoXnB80GNoRlh80GNofxB/KVdogriDGV4oRnBGiVdoYFBK8SyYYFBK8SyYYFBK8SyYRqCDkSyYRqBK8HuwRrhG0S1YRuhsCVnAanBHAIMwXQhK8S1YXQhK8S1YXQhK8S1YRxhK8S1YdchHMIJwdchoGIJwdchoGIJwR0hoGIJwYMhHYVIQXoiDGHLIYMhwQV4oYMhwQV4oYMhwQV4oYMhwQV4oR3hwQV4oh/BHkV4oXoiDGHLIXoiDGHLIXoiDGHLIR6iDGHLIfWCDkEfYc7hHwHkofWCDkEfYR/BUgGA4SAhK8S1YYAhUgGA4YFBK8S1YT+h6GEggh5CBaEj4SGh0kV2YYsCDYHNASGh0kVIoYsBxeHuwSDh0kVeASFBxeVdoSGh0kVIoYsCDYS1YSIDVgV4Qc7h80WHoc6DVgV4QSJiC0WHoSLB6GVdoSMhK8VKIdBh6GVdoXQiDYVKIhoh6GV4oY+CDYVdohtB6GEjgh6iBaEj4dBh7OSzIY+CDkVdoWiB0kWCAeRB0wGCASRB0kVnAeOB0wGCASSh0kElAeRB0wGCYSVh0kElwcxB0wGuodPB80HUgSYhJoV4o0TB80HUggGCHYV4oXZiIsF2wdchlAIJwSbiIsF2wSdBlAIJwXZiIsF2wdchlAIJwdYB1mEnoSqiCWEtoSgB1mF2wb+Bv+IJwShhUgEowY+BkKEpISmBKeEqQSqhPKGswduh9kErAdlh2cErYa0hK8SyYSwiCoS1YSyBLOIJwgoiCoEtQgkCCWEtogoiCoS1YgkBLgIJwgoiCoS1YS5hLsIJwgoiCoS1YgkCCWIJweFDjkHhoeICDGV4oeLDjkEvIfQCDGEvgeFDjkHhoeICDGV4oS/htcEwQeLDjkEwofQCDGHVRJIhqoVeAWphwQV4oYLBqoVeAYMhwQV4oYLBqoVeAXeBMQV4oTFhMcWA4TIhMoWFYeUCC0U+gTLhpaHlw0+iC0EzQbqh5uEzoeUCC0WHoTQBqEHlwemB6GS1Ye2iBaHqQemB6GS1Ye2h/oS0QegB6GGuoejB7gGh4emB7OS1Ye2hdaHsIf9DmME0YfEBNME1ITmiC0HuwTWB5uHlwf9CC0HuwfEB8WHlwXnB80GNoXoiDGHLITXh80GNoTZCDGHLIXnB80GNoXoiDGHLITahRsGNoTcBN2HLIXnB80GNoeICDGHLIfLh80E3wfQCDGE4Ifdh+OH3wTiB+gIDwfxB/KS1YTjiDkIE4TlB/KS1YTmh/6H9wToCBaG8ITmh/6H9wToCAMG8ITmh/6H9wToBOmG8ITrBn6ICoTshaaGCAfdh+OH3wTuB+gIDwfdh+OH3wTuB+gIDwTvh+OH3wfgh+gIDwfxB/KS1YfWCDkIE4b+BPEGswdchPKGswd2B32Hd4T0BnEE9YT+h6GE9wh5CBaE+I37hPoVeAh/BtcV4ofLhPuGNofQBP0HLIT+h6GFAAh5CBaHagUBhQYGA4UDBQkS1YhtB6GHcAh6iBaHagUEhQYVdoUHhQkS1YdBiC0VdoXQhwQS1YUKhQwF2wUNhQ8IJwdYB1mFEIdciCWHcY37hqoFEgh/BwQIB4UThRUVeAUWhRgV4ofLh80HyIfQCDGHygUZhRsGNoUchR4HLIZNB/KHagiAiDkIE4UfhSES1YUihSQIE4fxB/KS1YfWCDkIE4f9CC0FXQfEB8WFJYeRCCoHLIgqB9kV2YUxh7OHLIeRB6GSzIUnBssV4QUohuMHLIUqBUgS0oUrhS0VH4XVBS6HNAc6DVgV4QWmhTAWCAUxh/6WHoeRBUgWHoXeBssV4oUzCHYFNIU2BnoWIAXqBssHLIU3h6GHVQU5BTqVeAfXhWMGuoU8BT2WFYU/BUCG7wVCB1mF2wdihdOSz4VDhuMSyYVFCCWIJwVGh1mHNAVIBUmFSweLDjkHhofQCDGHVQ37hqoVeAVMhZeG/IVOBpgVeAVPhVEV2YeRCC0Hko0+iC0HLIVShz0SyYejBVQHqQVVh7OSyYVXBViGhIfEB8WGjwVaBi2SzIVbiBsS1Af9CC0FXQVehWAFYYfXhWMH1IZNBWMS1YVkhwQIE4f9B/6H9wgBiAMG8IVmB6GHNAVmCC0SzIVnhWkVHIVqhWwFbYcahcqGuoVvBXCFcgVzhXUFuIV2hXgFeYV7BXyFfgV/hYEFgoWEBYWFqAWHBYiFigWLhsIFjQWOhiAFkAYAhZGGA4YFCDkS1YXZhZMF2wdchZSIJwYLB80VeAYMiDYV4oXnBjCGNoXoiDkHLIh6iBaSyYWWBZeFmQWahZwFnYWfBqoFoIcahcGHkoWiBqoG2IWjhcGHVQdeBnoSz4WlBaaVKI37h80FqAh/CBaHVRJIh80FqAWphxeHVQWrBj+VHIWsha4Fr4WxBbKFtAW1hbcFuIW6BbuFvQW+hcAUoweOBcGHko3+hcMVPAXEhgIFxgeFDZQHhoXohceV4oYAhckGA4YFBcqS1YYAhgIGA4YFBcwS1YXNhc8VdoXQhdIVdodBhdOVdoXVBdaVdoXZhdgF2wdchoAIJwXZhn6F2wdchoAIJwYLBdyVeAXeCDYV4oYLBqoVeAYMiBaV4oeUBd+V4QXhBrkICoeUBeKV4QXkBrkHlwXnBeWGNoXohkoHLIXnBfMGNoXohkoHLIXqBeuF7QXuhfAF8YdPBfMHUgX0h6GSz4X2BjsF94c7h6GHkoX5BuMF+obJhfwV4of9Bf2F/wgBiBaHGQYAhgIGA4YFCBaGBohtB9kGCAh6iBaGCYYLBqoVeAYMiBaV4oYOB80S1YYPiDYIE4YRBkWGEofQCDYGFAYVhhcGGIYaBhuGHQYehiAGIYYjBiSGJgYnhikGKoYsBi2GLwgoiCoS1Yf9BjCGMgejB7gGh4gBiAMGooYzhuMS1AgeBjUS1AcuB6GWIAfLh80GNofXiC0GOAY5hjsGPIY+Bj+HPoZBBkKGRAdchkWGswZHDg2GSIcahkoHNA0+h9kV4QbqhkuHlwZNB80S1YiAiDYIE4h5CDYVdocah0wS1YcaiDkHLIc7iDkHLIZOhlAG7YZRhqiHNAc7hz0GuoeOBleS1Yh6hxeHNAh6hxeHNAh6hsaHNAaGB5uGXAZUhlMICoZUhwcICoZWCDYS1Yb+Bv+GsweOBleIMwcah0wIMwZZCBaS1YiAiDYGWoiAiDkGXAfQCDGHkogGCDYV4ogGCDYHVQdciCWIJwb+Bl2GXwZghmIGY4ZlBmgGaYZmhmgGaYgkCCWGswZrBmyGbgeAhm+HeoeAhm+HeoeAhnEHeofQCDGHVQfQCDGHVQZyiDGGdAh/CDkV4oZ1h4IVnAZ3BpgVeAZ4hnoGe4bqhoAS1AZ9Bn6S1AbqhoAGjwbqh5uGzIbqh5uGzIbsBoGG7YbsBoMGhIaGCBaSyYaGCDYVdoejB7gGh4aJB1mGjwaJB1mGjwb+BoqGjAgkBo2GjwfEBpCS1wfEB8WGkgfQCDGHLIcFhpOV4oaVBwQVdoiAhpaHsIfmhpgIDwaZhuMS0oiAh/oGmwgBiAMGooaciAMGngafhqEGooafhqEGooa8CIsS1Aa8CIsG6QakCBsGpYanBqiIMw37hqoVeAariBaSyYatCAMHNAauhrAV4oaxh0wHKwdciCWGswa0iBaGtga3hrkHlweODVgGuoa8CIsS1Aa8CIsG6QbAhsIGvYbAhsIGvwbAhsIGw4bFBsaGyAbJhssGzIbOBs+VGAbRBtKG1AbVhtcG2IbaBtuG3QbehuMG4AbhhuMG5IfQCDGG5gfQCDGG5gc7iCoV4oc7hz0HLIbnh0SG6QeAhw0HeofQCDGV4ocaiDGHVQbqh5uHlwbsB8WG7YejB7gHqQfEB8WG7wgBiAMG8IcahvIHkobziCoG9Qb2hvgIMwb5hvsG/Ib+Bv+IJwb+Bv+HAQcaiDGHVQcChwQSzIcFhwcHLIc7iCoUv4c7hz0H0YcIh0SHCgcah0wHVQdlh2cHC4gkCCWHdIeAhw0Hg4fQCDGUv4caiDGHVQeaB5uHDoejB7gHEAcRhyOHEwiAiDYHFIcWBxeHPogBiAMHGQh5CBaHagcaiDGIB4c7hxwHVQh6iBaH0Ycmh7gHQAcmh7gVQgh6iBaHcAdciCWHHYcfB5uHIIciByOHJQfQCDGH0YcmiAMHKAcpiC0HKwc7iCoHLIcuCC0HL4c7jk4HygcuCC0HL4cxDk4HygcyjVgWIAc7hz0HNAc6DVgHNYc7hz0HtQc6DVgHNYc7hz0HtQc6DVgHNwc7hz0HOIc6DVgUvgc7hz0HPohtB6GHcwh6iBaHQAhtB6GHcwh6iBaHQAdBh6GSzIdDB0SHRgdHh0kWCAdKh0wHTYdPB80HUgfrCHYV4o0TB80HVogGCHYIB4dQh80HUgfsiC0V4o0TB80HU4gGCHYHVQ0TB80HVogGCHYIB4dYB1mHWwdciCWHdIdeB9kSz4dfh2ESyYdih9kHZAdlh2cHaIdih9kHZAdlh2cHaIgoiCoHaggkCCWHcYdriCoHcAdtB26HcYgoiCoHcAgkCCWHcYgoiCoHcwgkCCWHdId2B32Hd4d5B4IHeod8B32HfweAh4IHg4eFDjkHhoeICDGV4oeLDjkHiYfQCDGIB4eLDjkHiYfQCDGIB4eLDjkHjIfQCDGUv4ePiC0V4oeOCDGHkoePiC0V4oeRCDGHkoeUCC0U+geVh5uHlw0+iC0HmIeaB5uHnQ0+iC0HmIeaB5uHnQemB6GS1Yeeh7gHqQegB6GHtQejB7gHpIemB7OS1Yenh/oHqQeqh6wS1Yeth68HsIeyB7OHtQe2h7gHuYf1iC0Huwe8h74ICof9CC0Hv4fEB8WHwQf9CC0Hv4fEB8WHwQf9CC0HwofEB8WHxwfLh80HyIfQCDGHygfLh80HzofQCDGH0YfLh80HzofQCDGH0YfTB9kH1IfWCDkS0ofXh9kH2oiAiDkH3Afdh+OH3wfgh+gIDwfiB+OH5Qfmh+gH6YfrB/KVdofuCDkH74fsh/KVdofuCDkH74fxB/KS1Yf0CDkIE4f1h/6H9wf4h/oH+4f9B/6IAAgBiAMIBIf9B/6IAAgBiAMIBIgGCHYIB4gJCBsICogMCA2IDwgQiBIIE4gVCBaIGAgZiBsIHIgeCB+S1AghCCKVdogoiCoS1YgkCCWIJwgoiCoS1YgriC0V4o0+iC0ILogwCDGIMwg0iDYIN4iAiDkS1Yg6iDwVH4AAQEY/kYAAQKK/4gAAQKrBgQAAQE5BpAAAQakBLAAAQQ4BLAAAQKA/hQAAQMgBLAAAQNIBLAAAQYOBLAAAQLDB2wAAQLDBzAAAQSXBgQAAQNSAAAAAQL7BgQAAQMM/hQAAQNBB2wAAQMvB2wAAQMGB2wAAQLYBpAAAQQaBbQAAQJrBhgAAQJrBqQAAQOsBXgAAQOrBLAAAQNcBLAAAQJdBhgAAQIIBOwAAQE5BhgAAQQQBOwAAQJ6BhgAAQQaBLAAAQKhBhgAAQRMBLAAAQIK/hQAAQLDBwgAAQJrBcgAAQKO/jwAAQL7B2wAAQJnBgQAAQL7B5QAAQL2B5QAAQKZBmgAAQJiBwgAAQJdBcgAAQJY/jwAAQJE/m4AAQM2B5QAAQM2B2wAAQNmAAAAAQM2BgQAAQM0/jwAAQKhB6gAAQP8BkAAAQGPBzAAAQE5BfAAAQFb/jwAAQGPB2wAAQLiBgQAAQJY/m4AAQIO/hQAAQFTB5QAAQLQBgQAAQEY/m4AAQE5BiwAAQKy/jwAAQIw/jwAAQPABOwAAQJDB5QAAQE5B7wAAQK8BmgAAQJE/jwAAQEI/jwAAQJsBgQAAQG3BmgAAQH0BgQAAQMK/jwAAQJs/jwAAQOEBLAAAQL9AAAAAQMK/m4AAQRgBOwAAQPlBgQAAQbgBgQAAQPqBLAAAQb0BOwAAQHRBiwAAQKU/jwAAQEY/jwAAQIDBiwAAQI6/j4AAQIMBOwAAQHC/j4AAQG9BlQAAQMGBzAAAQKhBfAAAQMGCAIAAQKhBswAAQRMBXgAAQLt/jwAAQJ2/jwAAQNtBiwAAQJHBiwAAQJ/B2wAAQJRB5QAAQH0BiwAAQOOBOwAAQGIBmgAAQL4BgQAAQNtBkAAAQPeB2wAAQF8BLAAAQJsBOwAAQPuBkAAAQO+AAAAAQKO/fYAAQJE/fYAAQX8BgQAAQYkBgQAAQUoBOwAAQLDBgQAAQKO/pgAAQLDCAwAAQJrBrgAAQJiCAwAAQPoBqQAAQJdBrgAAQOEBVAAAQGPCAwAAQKABqQAAQE5BrgAAQI0BVAAAQFb/pgAAQMg/pgAAQMvCAwAAQU8BqQAAQJ6BrgAAQQaBVAAAQMGCAwAAQUUBqQAAQKhBrgAAQRMBVAAAQJ/CAwAAQRqBqQAAQJHBrgAAQPoBVAAAQGk/hQAAQKlBgQAAQKJBiwAAQIpBgQAAQL7BwgAAQXwBgQAAQTYBOwAAQWMBgQAAQKOBgQAAQIWBgQAAQISAAAAAQNFBgQAAQJGBgQAAQNUBwgAAQZoBgQAAQPzBgQAAQaQBgQAAQFXBiwAAQFoBgQAAQGPBiwAAQKPBiwAAQFSBiwAAQJ2BiwAAQQkBgQAAQdsBgQAAQQaAAAAAQSGBgQAAQO4BLAAAQMABgQAAQR+BgQAAQI0BgQAAQN3BOwAAQJfBgQAAQF4BmgAAQGQBgQAAQKeBgQAAQG9BmgAAQI6/hQAAQMlBgQAAQXcBgQAAQMhAAAAAQRgBgQAAQJ2BLAAAQJdBgQAAQIyBgQAAQPaBgQAAQIbBLAAAQM1BOwAAQHKAAAAAQg1B5QAAQo2BiwAAQgCAAAAAQfuBkAAAQlgBOwAAQccBkAAAQf6BOwAAQbNAAAAAQXaBgQAAQfvBgQAAQVG/nAAAQW5BkAAAQZuBOwAAQU8/hQAAQOxBmgAAQNWBOwAAQfcBgQAAQkuBgQAAQc4/nAAAQfBBkAAAQcc/hQAAQZ3BkAAAQYE/hQAAQSIBgQAAQKFBgQAAQJOBOwAAQRbBwgAAQd2BgQAAQPyAAAAAQOrBcgAAQYLBOwAAQN7AAAAAQNUBgQAAQM1AAAAAQMiB5QAAQJOBkAAAQK3B7wAAQOYBgQAAQMg/hQAAQJ6BfAAAQJJB5QAAQJTBkAAAQPwBOwAAQHL/hQAAQg1BgQAAQorBgQAAQf4AAAAAQfuBLAAAQk5BOwAAQe8AAAAAQcmBLAAAQfkBOwAAQbQAAAAAQLmB5QAAQT4BgQAAQQGBOwAAQfOBOwAAQK0BgQAAQK8/hQAAQOuBOwAAQQGBiwAAQO5BOwAAQO2BOwAAQI+B5QAAQTEBiwAAQJdBkAAAQOjBOwAAQTEBgQAAQJnBkAAAQOiBOwAAQKUBiwAAQGPB5QAAQFbAAAAAQU8BiwAAQJ6BiwAAQTOBiwAAQHRBkAAAQTOBgQAAQIDBkAAAQUoBiwAAQMGB5QAAQKhBkAAAQKYBgQAAQNcBgQAAQIiAAAAAQIZBLAAAQKmBOwAAQG0/hQAAQUoBgQAAQKhB7wAAQMMBgQAAQLvAAAAAQMRBiwAAQKvAAAAAQNhBgQAAQR/BgQAAQJJ/nAAAQLDB5QAAQQGBgQAAQKOAAAAAQJrBkAAAQJDAAAAAQJY/hQAAQJi/hQAAQMvB5QAAQJ6BkAAAQJ/BzAAAQJHBfAAAQE6BmgAAQFs/9gAAQJs/9gAAQHOBcgAAQHMBcgAAQF8/9gAAQPYBmgAAQXqBgQAAQPjAAAAAQPYBLAAAQYABOwAAQPj/hQAAQLDBiwAAQONBgQAAQKO/4gAAQL7BiwAAQVxBgQAAQMM/4gAAQJnBiwAAQR0BgQAAQH0/nAAAQUkBgQAAQI6/5wAAQHrBgQAAQLaBOwAAQLtAAAAAQJuAAAAAQI+BiwAAQTtBgQAAQJY/4gAAQJdBiwAAQRTBgQAAQFTBgQAAQNwBgQAAQEY/nAAAQFABgQAAQMaBgQAAQLeAAAAAQPyBOwAAQJYBOwAAQJ/BgQAAQIPBLAAAQLQBOwAAQIzBLAAAQL4BOwAAQIcBLAAAQK8BLAAAQScBgQAAQKOBLAAAQH0/hQAAQH+AAAAAQFUBOwAAQEEAAAAAQFyBLAAAQIwBOwAAQFHAAAAAQG+BmgAAQHlBmgAAQHgBgQAAQGDAAAAAQMKBmgAAQQkBOwAAQKo/hQAAQbMBOwAAQa4BOwAAQLiBLAAAQKHAAAAAQN9BLAAAQNSBLAAAQNCBmgAAQTYBgQAAQLm/hQAAQHRBmgAAQKUBgQAAQKUBOwAAQKoBOwAAQH0BOwAAQFU/hQAAQJsBLAAAQHW/hQAAQG7BmgAAQFABOwAAQDc/hQAAQJEBgQAAQGQ/hQAAQHgBcgAAQF8/hQAAQRqBOwAAQKuBLAAAQM0BOwAAQV4BOwAAQJHBmgAAQHlAAAAAQJNBLAAAQHi/1YAAQJTBLAAAQNwBOwAAQHi/hQAAQGbBmgAAQFUAAAAAQJnBLAAAQN6BOwAAQU8BgQAAQKPBLAAAQKEBLAAAQJmBkAAAQUUBOwAAQKmBLAAAQEI/hQAAQJ7BLAAAQHM/hQAAQHqBLAAAQMMBOwAAQJE/hQAAQHrBmgAAQWMAAAAAQWM/hQAAQZABLAAAQdEBOwAAQWM/1YAAQNEBXgAAQVkBOwAAQP8AAAAAQKEBmgAAQP8BgQAAQHg/hQAAQOuBXgAAQXIBOwAAQUZBLAAAQW0BOwAAQRd/hQAAQPwBLAAAQSwBOwAAQNJAAAAAQOdBLAAAQR0BOwAAQMHAAAAAQIlBgQAAQIJAAAAAQJtBgQAAQPUBgQAAQJcAAAAAQKH/hQAAQGNBmgAAQHMAAAAAQHRBLAAAQGYBLAAAQGkAAAAAQF8AAAAAQHiAAAAAQPRBOwAAQIZBgQAAQISApQAAQJEBLAAAQPGBOwAAQRdBmgAAQZyBgQAAQPoAAAAAQE5BLAAAQF8BOwAAQFsAAAAAQKiBLAAAQQaBOwAAQKYBLAAAQR+BOwAAQI6BmgAAQFA/nAAAQIw/nAAAQakBOwAAQHg/nAAAQHW/nAAAQHhBmgAAQFd/hQAAQII/nAAAQJQBLAAAQOsBOwAAQHi/nAAAQKIBLAAAQT7BgQAAQEI/kgAAQHnBLAAAQGQ/kgAAQHPBmgAAQIwBgQAAQF8/kgAAQI5BLAAAQHM/kgAAQKwB5QAAQKMAAAAAQJ2AAAAAQKwBgQAAQKK/pgAAQKIBlQAAQLOB5QAAQIcAAAAAQKo/pgAAQKo/j4AAQIc/j4AAQL2BgQAAQKIBmgAAQQ4BgQAAQIc/nAAAQJE/nAAAQJiB5QAAQIwB5QAAQMCBgQAAQHfAAAAAQMsBzAAAQUABgQAAQJ0BfAAAQQ4BOwAAQJS/hQAAQMQB5QAAQMQB2wAAQL3AAAAAQL3/hQAAQJs/hQAAQL3/pgAAQGPBgQAAQJYBgQAAQFb/nAAAQE5BkAAAQLaB5QAAQJ7B7wAAQQaBkAAAQLaBgQAAQKy/pgAAQJ7BmgAAQPABgQAAQIw/pgAAQJE/pgAAQFTBzAAAQE5B1gAAQKoBgQAAQJY/pgAAQEI/pgAAQJY/nAAAQEI/nAAAQPGB5QAAQOhAAAAAQPuBiwAAQO9AAAAAQPGBgQAAQZ8BgQAAQOh/pgAAQPuBLAAAQZ8BOwAAQO9/pgAAQNBB5QAAQMKAAAAAQKhBiwAAQMK/pgAAQNBBgQAAQMK/nAAAQKIBkAAAQKDB5QAAQKIBiwAAQJ2/hQAAQKkB5QAAQINBiwAAQHgAAAAAQKU/pgAAQH+BLAAAQMgBOwAAQEY/pgAAQH9BhgAAQJIBgQAAQPoBgQAAQH9BLAAAQHW/pgAAQJIB5QAAQH9BkAAAQHWAAAAAQJICDQAAQQaBswAAQH9BvQAAQOYBYwAAQH0AAAAAQJIB2wAAQQaBgQAAQIc/pgAAQH9BiwAAQNIBOwAAQHq/pgAAQI6AAAAAQG9BwgAAQK8BcgAAQI6/pgAAQHM/pgAAQI6/nAAAQG9BcgAAQK8BOwAAQG4/nAAAQLt/pgAAQJ2/pgAAQMGBgQAAQUUBgQAAQLt/nAAAQKhBLAAAQJ2/nAAAQKaB5QAAQJnAAAAAQJHBkAAAQKaBgQAAQSwBgQAAQJn/pgAAQII/pgAAQPeB5QAAQO5AAAAAQNtBhgAAQPeBgQAAQc6BgQAAQO5/pgAAQNtBLAAAQYOBOwAAQMl/pgAAQKrB5QAAQKrB2wAAQJQBhgAAQIdAAAAAQJ/B5QAAQRqBgQAAQJHBhgAAQJRB2wAAQJJAAAAAQH0BkAAAQOYBOwAAQHsAAAAAQJRBgQAAQQQBgQAAQJJ/pgAAQH0BLAAAQNcBOwAAQHi/pgAAQKhBmgAAQJs/pgAAQGLBwgAAQG4AAAAAQNtBqQAAQYOBWQAAQMlAAAAAQJHBqQAAQPoBWQAAQII/hQAAQJrBpAAAQOEBOwAAQIjAAAAAQIKB7wAAQK8BgQAAQHOAAAAAQH1BLAAAQLQBLAAAQJhBLAAAQPoBLAAAQE5BmgAAQF8BgQAAQEIAAAAAQJDBgQAAQOEBgQAAQKDBgQAAQRMBgQAAQKU/hQAAQJrBiwAAQRMBOwAAQIc/hQAAQG9BiwAAQPUBOwAAQEY/hQAAQPoBOwAAQMrBLAAAQS5BOwABAAAAAEACAABAAwAGAABAFAAbgABAAQCYAJhBOUE7QABABoBnAGoAaoBrwGyAbMBuAG9AcUBxwHIAckBygHPAdIB0wHYAd0B5QHnAegB6QHqAfYDjwPYAAQAAAASAAAAGAAAOhoAADoaAAH9lAScAAH9WAScABoANgA8AEIASABOAFQWggBaAGAAZgBsAHIAeAB+AIQAigCQAJYAnACiAKgArgC0ALoAwADGAAECYgdsAAECxQeUAAECwgYEAAECYgYEAAEDSwYEAAEDSweUAAECpwYEAAEDnwYEAAECCAYEAAEDrAYEAAECxgYEAAECawSwAAECXQSwAAEC4QSwAAEC4QZUAAECegSwAAECRwSwAAEDZgSwAAEBpASwAAEDHgSwAAECWASwAAECcQYYAAECRwZUAAEDIAYEAAECEgSwAAQAAAABAAgAAQAMABIAAQCUAKAAAQABBMMAAgAVACQAPQAAAEQAXQAaAKIAqAA0AKoAsQA7ALQAuABDALoAvwBIAMEAwQBOAPMA8wBPARUBFQBQA9gD2ABRBD8EQQBSBEMEQwBVBEcERwBWBEoESwBXBE0ETQBZBFMEUwBaBFcEVwBbBFkEWQBcBGQEZQBdBHgEeQBfBHsEewBhAAEAAAAGAAH+vALlAGIAxgDMANIA2ADeAOQA6gDwAPYBAgD8AQIBCAEOARQBGgEUARoBIAEmASwBMgNcATgDJgE+AXoBpAFEAUoBhgFQAZ4BpAHOAc4ByAFWAVwBpAHaAaQBngO2AfIBYgHmAYwBaAFuAYwBdAF6AXoBegF6AXoBegGAAYYBhgGGAYYBzgHOAc4BzgHaAdoB2gHaAdoB2gHmAeYB5gHmAYwBjAHOAZIBsAGYAZ4BpAGqAbABtgG8AcIByAHOAdQB2gHgAeYB7AHyAAEDcAW8AAEEdASwAAEEPAV1AAEE2QSwAAEDxAW2AAEDwAW2AAEEigV1AAEFKAW2AAECHAW2AAEELgW2AAEBsAW2AAEGlQW2AAEFiwW2AAEFbgSwAAEEVgSwAAEDhAV4AAEEPgW2AAEFIAW2AAEEnAW2AAEEYAW2AAED/AW2AAEDSAQtAAEEMwYUAAEC3QYOAAEBkwYUAAEG0gOEAAEC7QReAAEGLAReAAEDrAReAAEDPAReAAED1AOEAAEGmgOEAAED/AOEAAED6AReAAEHHAOEAAEEJAOEAAEEMwReAAEEOAOEAAEDSAOEAAED6AOEAAEDhARMAAEDmAO2AAEEOAO2AAEDwAReAAEBkwReAAECCAReAAEEGgOEAAEGQAReAAEEZAReAAEEpAReAAEC0AReAAQAAAABAAgAAQAMABIAAQBYAGQAAQABBPsAAgALACQAPQAAAEQAXQAaAJQAlQA0AJcAlwA2AJsAnAA3ALQAtQA5ALcAtwA7ALsAvAA8ASoBKwA+A0YDSQBAA14DYQBEAAEAAAAGAAH/LQTwAEgAkgCYAJ4ApACqAKoAsAC2ALwAyADCAMgAzgDUAYIA2gGCANoA4ADmAOwA8gD4AP4BBAEKARABRgEWARwBIgEoAUwBRgEuAS4BNAE6AUABRgGIAUYBTAFSAVgBXgFkAXYBagFwAXYBfAGCAYIBggGOAY4BiAGIAYgBlAGUAY4BlAGCAYgBggGIAY4BlAGOAZQAAQM3BbwAAQPoBXgAAQRWBYIAAQPIBXgAAQPMBbYAAQSwBYIAAQU0BbYAAQJYBbYAAQRMBbYAAQG8BbYAAQahBbYAAQWXBbYAAQOYBXgAAQOoBXkAAQRHBbYAAQUsBbYAAQSmBbYAAQc6BbYAAQRqBbYAAQRWBbYAAQQqBbYAAQOYA+gAAQNcBCMAAQQ/BhQAAQNwA+gAAQLpBfoAAQGfBF4AAQPeBF4AAQGfBhQAAQZuA+gAAQPUA+gAAQQ/BF4AAQMQBHMAAQMlBBQAAQGpBUwAAQRwBF4AAQZKBF4AAQPUBF4AAQP8BF4AAQNmBF4AAQR4BWQAAQOOA+gAAQUbBbYAAQRABF4ABAAAAAEACAABAAwklgACABYATAACAAEFOwVBAAAABwABAB4AAAAwAAAAJAABACoAAAAwAAAAMAABLaYAAQJpADwAAQGoBEoAAQGoACgAAQJpBEoDMQ8SDxgRRhFMDx4RUhFkEWoRcBF2DaoRiA6IEZQRmhGgEaYRrA8kDyoRshG4EswS0hHKEdAR4hHoD/YR7hLYLvYP9hHuEt4S5BISEhgSbBJyEioSMBI2EjwSQhJIDMYSVA88EmYSbBJyEuoS8BH6EgARWBFeEfoSABF8EYIREBGOEfoSABKEEooSwBLGEsASxhG+EcQSwBLGEdYR3BKEEooPnBH0EfoSABH6EgASBhIMEh4SJBL2EvwShBKKEwITCBJOEpYSWhJgEwITCBJ4En4hKg8YISoPGCEqDxghKg8YISoPGCEqDxgg3A52IjgRUiJKEXYiShF2IkoRdiJKEXYibhGsIm4RrCJuEawibhGsIkQRaiKMEegikhHuIpIR7iKSEe4ikhHuIpIR7iKSEe4ithIwIrYSMCK2EjAithIwItQSZhLYLvYMzAzSEuoS8BLqEvAS6hLwEuoS8BLqEvAS6hLwDnwOghFYEV4RfBGCEXwRghF8EYIRfBGCEsASxhLAEsYSwBLGEsASxg+cEfQShBKKD5wR9A+cEfQPnBH0D5wR9A+cEfQPnBH0EoQSihKEEooShBKKEoQSihMCEwgR+hIAEwITCCEqDxgS6hLwISoPGBLqEvAPEg8YEuoS8CI4EVIRWBFeIjgRUhFYEV4iOBFSEVgRXiI4EVIRWBFeIkQRahH6EgARZBFqDNgM3iJKEXYRfBGCIkoRdhF8EYIiShF2EXwRghFwEXYRfBGCIkoRdhF8EYIiYhGUEfoSACJiEZQR+hIAImIRlBH6EgAOiBGUEfoSACJoEaAi5hKKEZoRoBKEEooibhGsEsASxiJuEawSwBLGIm4RrBLAEsYRphGsEsASxiJuEawSwBLGEIwQkgzkEYIhMA8qEsASxhGyEbgRvhHEEb4RxCMKEtIjBBLGEswS0hLAEsYSzBLSEsASxhLMEtIM6gzwEswS0hLAEsYijBHoEoQSihHiEegShBKKIowR6BKEEooM9gz8EeIR6BKEEooikhHuD5wR9CKSEe4PnBH0IpIR7g+cEfQNAg0IDQ4NFCMWEuQSBhIMEt4S5BIGEgwjFhLkEgYSDCKqEhgSHhIkIqoSGBIeEiQSEhIYEh4SJCKqEhgSHhIkEmwSchL2Evwi2hJyEvYS/BJsEnIS9hL8IrYSMBKEEooithIwEoQSiiK2EjAShBKKIrYSMBKEEooithIwEoQSihIqEjAShBKKIsISSBJOEpYi1BJmEwITCCLUEmYi2hJyEngSfiLaEnISeBJ+ItoSchJ4En4NGhKiDSANJiLCEkgSThKWIsISSBJOEpYiwhJIEk4SliLUEmYTAhMIEsASxhLAEsYigBHQEdYR3A8SDxgS6hLwDT4NRA1KDVANVg1cD5APlg8SDxgS6hLwISoPGA0sEvAhKg8YDSwS8CEqDxgNLBLwISoPGA0sEvAhKg8YIxwS8CEqDxgS6hLwISoPGCMcEvAhKg8YIxwS8CEqDxgjHBLwISoPGCMcEvAhKg8YEuoS8BFwEXYRfBGCIkoRdhF8EYIiShF2EXwRgiJKEXYNMhGCIkoRdg0yEYIiShF2DTIRgiJKEXYiUBGCIkoRdhF8EYIibhGsEsASxhGmEawSwBLGD/YR7g+cEfQikhHuD5wR9CKSEe4NOBH0IpIR7g04EfQikhHuDTgR9CKSEe4imBH0IpIR7g+cEfQgQA1EDUoNUCBADUQNSg1QIEANRA1KDVAgQA1EDUoNUA0+DUQNSg1QEioSMBKEEooithIwEoQSiiBMDVwPkA+WIEwNXA+QD5YgTA1cD5APliBMDVwPkA+WDVYNXA+QD5YPPBJmEwITCCLUEmYTAhMIItQSZhMCEwgSbBJyEvYS/BH6EgANYg1oDZINmBH6EgANbg10DXoNgA8eEVIiOBFSEVgRXhFkEWoNhg2MDZINmBH6EgAPnBH0EXARdg2eDaQRBBEKDaoRiCJiEZQSNhI8DbANtg28DcIRphGsEbIRuBG+EcQNyA3ODi4ONA3UDdoR4hHoEoQSig/2Ee4N4A3mDewN8g34Df4R+hIAEt4S5BISEhgSHhIkDgQOCg4QLtgS9hL8DhYOHBL2EvwSbBJyDiIOKBI2EjwPPBJmDi4ONBJsEnISeBJ+EXwRghF8EYISWhJgEToRQBIeEiQR+hIAIOgOlA6aDqAOpg6sDjoOQA5GDkwOUg5YDl4OZA5qDnAQgBCGISoPGBLqEvAibhGsEsASxiKSEe4PnBH0IrYSMBKEEooRfBGCINwOdg58DoIOiBGUEfoSACJiEZQR+hIAInQRuCJ6EcQP9hHuD5wR9CKSEe4PnBH0IlARgg/qD/AOjg6UDpoOoA6mDqwiYhGUEfoSAA6yDrgOvg7EIowR6BKEEoohKg8YEuoS8CEqDxgS6hLwIkoRdhF8EYIiShF2EXwRgiJuEawSwBLGIm4RrBLAEsYikhHuD5wR9CKSEe4PnBH0IxYS5BIGEgwjFhLkEgYSDCK2EjAShBKKIrYSMBKEEooRBBEKDsoO0CJoEaAi5hKKDtYO3BH6EgAO4g7oEGgQbhJsEnISeBJ+ISoPGBLqEvARcBF2EXwRgiKSEe4PnBH0ItQSZhMCEwgPeA9+Du4O9A76DwAPBg8MDwYPDA8SDxgPHhFSEVgRXhLMEtISbBJyEh4SJBJ4En4QLBAyEqgSrhFGEUwSKhIwEjYSPBFwEXYRfBGCDyQPKhLAEsYPMA82EfoSABLeEuQSBhIMDzwSZhMCEwgS6hLwEfoSABH6EgAR+hIAEVgRXg9CD0gR+hIAEfoSABF8EYIRfBGCD04PVBE6EUAROhFAD1oPYBBoEG4SwBLGEfoSABH6EgAQCBAOEwITCBMCEwgShBKKEoQPZhKEEooSwBLGEOwQ8g9sD3IPeA9+D4QPihLAEsYPkA+WEdYR3BHWEdwR1hHcEoQSihKEEooQjBCSD5wR9A+iD6gPrg+0D7oPwBIGEgwSBhIMEgYSDBIGEgwSBhIMELwQwhC8EMIPxg/MD8YPzBIeEiQSwBLGEsASxhLAEsYSwBLGEvYS/BL2EvwShBKKEQQRCg/SD9gTAhMIEk4SlhMCEwgTAhMIEngSfg/eD+QP6g/wD+oP8BAsEDIQLBAyECwQMhFYEV4P9hHuD/wQAhBoEG4QCBAOEBQQGhLAEsYRvhHEECAQJhH6EgAQLBAyECwQMhA4ED4QRBBKEFAQVhBcEGIQaBBuEHQQehCAEIYQjBCSEJgQnhCkEKoQsBC2EoQSihKEEooR+hIAEfoSABEQEY4R1hHcEoQSihH6EgASBhIMELwQwhIeEiQS9hL8EngSfhH6EgAQyBDOENQQ2hDgEOYSwBLGEOwQ8hH6EgAQ+BD+EQQRChH6EgAR+hIAERARjhEWERwRvhHEEsASxhHWEdwShBKKEfoSABIGEgwSHhIkESIRKBMCEwgSWhJgEngSfhLqEvAR+hIAEfoSABF8EYIROhFAEToRQBEuETQSwBLGEVgRXhLAEsYShBKKEToRQCIyEUwR+hIAEUYRTBH6EgARRhFMEfoSACI4EVIRWBFeIkQRahH6EgARZBFqEfoSABFkEWoR+hIAEWQRahH6EgARZBFqEfoSACJKEXYiUBGCIkoRdiJQEYIRcBF2EXwRghFwEXYRfBGCIkoRdhF8EYIiVhGIIlwRjiJiEZQR+hIAImgRoCLmEooRmhGgEoQSiiJoEaAi5hKKEZoRoBKEEooRmhGgEoQSihGmEawSwBLGIm4RrCMEEsYidBG4InoRxBGyEbgRvhHEEbIRuBG+EcQSzBLSEsASxiMKEtIjBBLGEswS0hLAEsYSzBLSEsASxiKAEdAR1hHcEcoR0BHWEdwijBHoEoQSihHiEegShBKKEeIR6BKEEooR4hHoEoQSiiKSEe4imBH0IpIR7iKYEfQikhHuIpgR9CKSEe4imBH0IxAu9hH6EgAjEC72EfoSACMWEuQSBhIMEt4S5BIGEgwjFhLkEgYSDBLeEuQSBhIMIqoSGBIeEiQSEhIYEh4SJCKqEhgSHhIkIqoSGBIeEiQiqhIYEh4SJCLaEnIjIhL8EmwSchL2EvwSbBJyEvYS/BJsEnIS9hL8EioSMBKEEooSKhIwEoQSihIqEjAShBKKIrYSMCLmEooithIwIuYSiiK8EjwTAhMIEjYSPBMCEwgSQhJIEk4SlhJCEkgSThKWIsgSVBJaEmAiyBJUEloSYCLUEmYTAhMIItoSchJ4En4SbBJyEngSfhJsEnISeBJ+EoQSiiMiEvwSkBKWEpwTCBLqEvAi8hKiEqgSrhK0EroSzBLSEsASxhLMEtIS2C72Et4S5BLqEvAS9hL8EwITCBMOExQAAQVWBgQAAQWwBgQAAQWw/oQAAQUxBgQAAQUx/oQAAQS6BiwAAQNtBgQAAQNt/oQAAQY7BgQAAQY7/oQAAQfJBgQAAQfJ/oQAAQfTBgQAAQfT/oQAAQMQBgQAAQSTBgQAAQST/oQAAQTVBmgAAQS6BmgAAQT0BmgAAQaaBgQAAQaa/oQAAQVvBgQAAQVv/oQAAQcUBgQAAQcU/oQAAQZaBgQAAQZa/oQAAQVKBgQAAQVK/oQAAQUSBgQAAQUS/oQAAQblBgQAAQbl/oQAAQUbBgQAAQUb/oQAAQaJBgQAAQaJ/oQAAQRkBgQAAQflBgQAAQfl/oQAAQM5BgQAAQM5/oQAAQKkBgQAAQKk/oQAAQhIBgQAAQhI/oQAAQkMBgQAAQkM/oQAAQdvBgQAAQdv/oQAAQYABgQAAQYA/oQAAQS+BgQAAQS+/oQAAQLwBgQAAQTyBgQAAQTy/oQAAQZKBgQAAQZK/oQAAQTsBgQAAQTs/oQAAQcrBgQAAQcr/oQAAQb2BgQAAQb2/oQAAQThBgQAAQTh/oQAAQknBgQAAQkn/oQAAQjyBgQAAQjy/oQAAQee/oQAAQdWBgQAAQdW/oQAAQXLBgQAAQp5BgQAAQp5/oQAAQnTBgQAAQnT/oQAAQj4BgQAAQj4/oQAAQhiBgQAAQhi/oQAAQVoBgQAAQVo/oQAAQSBBgQAAQSB/oQAAQYXBgQAAQYX/oQAAQYhBgQAAQYh/oQAAQYpBgQAAQYp/oQAAQOcBgQAAQOc/oQAAQewBgQAAQew/oQAAQWFBgQAAQWF/oQAAQUZBgQAAQKmBgQAAQKm/oQAAQYzBgQAAQYz/oQAAQT+BgQAAQRmBgQAAQRm/oQAAQaRBgQAAQaR/oQAAQWRBgQAAQWR/oQAAQaq/oQAAQLjBgQAAQLj/oQAAQN7BgQAAQN7/oQAAQM9BgQAAQM9/oQAAQYUBgQAAQYU/oQAAQT0BgQAAQb6BgQAAQb6/oQAAQakBgQAAQak/oQAAQaDBgQAAQaD/oQAAQTXBgQAAQTX/oQAAQVcBgQAAQVc/oQAAQSaBgQAAQSa/oQAAQSmBgQAAQSm/oQAAQZeBgQAAQUdBgQAAQUd/oQAAQTLBgQAAQTL/oQAAQVMBgQAAQVM/oQAAQPTBgQAAQPT/oQAAQPVBgQAAQPV/oQAAQgMBgQAAQgM/oQAAQiJBgQAAQiJ/oQAAQi+BgQAAQi+/oQAAQaHBgQAAQaH/oQAAQUIBgQAAQUI/oQAAQdcBgQAAQdc/oQAAQeyBgQAAQey/oQAAQXDBgQAAQXD/oQAAQVtBgQAAQVt/oQAAQRKBgQAAQRK/oQAAQTZBgQAAQTZ/oQAAQMvBgQAAQMv/oQAAQQxBgQAAQQx/oQAAQSHBgQAAQSH/oQAAQi6BgQAAQi6/oQAAQNCBgQAAQNC/oQAAQVEBgQAAQVE/oQAAQUvBgQAAQUv/oQAAQMZBgQAAQayBgQAAQay/oQAAQQSBgQAAQQS/oQAAQXuBgQAAQXu/oQAAQRxBgQAAQRx/oQAAQVgBgQAAQVg/oQAAQUZ/oQAAQQdBgQAAQQd/oQAAQXsBgQAAQXs/oQAAQR7BgQAAQR7/oQAAQS6BgQAAQS6/oQAAQRk/oQAAQMZ/oQAAQXL/oQAAQYfBgQAAQYf/oQAAQMdBgQAAQMd/oQAAQVQBgQAAQVQ/oQAAQT2BgQAAQT2/oQAAQeLBgQAAQeL/oQAAQfbBgQAAQfb/oQAAQaBBgQAAQaB/oQAAQZe/oQAAQT0/oQAAQUQBgQAAQUQ/oQAAQOiBgQAAQOi/oQAAQRoBgQAAQRo/oQAAQP6BgQAAQP6/oQAAQYMBgQAAQYM/oQAAQUzBgQAAQUz/oQAAQe8BgQAAQe8/oQAAQbZBgQAAQVW/oQAAQSgBgQAAQSg/oQAAQT+/oQAAQSiBgQAAQSi/oQAAQPnBgQAAQPn/oQAAQVCBgQAAQVC/oQAAQbZBmgAAQbZ/oQAAQSNBmgAAQMQ/oQAAQPpBgQAAQPp/oQAAQTBBgQAAQTB/oQAAQJxBgQAAQJx/oQAAQSFBgQAAQSF/oQAAQUGBgQAAQVIBgQAAQVI/oQAAQTVBgQAAQTV/oQAAQN5BgQAAQN5/oQAAQSNBgQAAQSN/oQAAQZWBgQAAQZW/oQABAEAAAEACAABAAwRKAACABYALgACAAEIiwiOAAAABAAAABIAAQASAAAAEgABABIAAQAABEoDMQ3aIc4O4iHODughzg70Ic4O+iHODwYhzg8SIc4PGCHODx4hzg3gIc4PJCHOD7ohzg8wIc4PPCHOD0Ihzg/AIc4PQiHOD8Yhzg9aIc4PiiHOD2Yhzg9sIc4PciHOD3ghzg+EIc4PiiHOD8whzg9OIc4O7iHOD04hzg8AIc4PDCHOD04hzg+WIc4PtCHOD7Qhzg8qIc4PtCHODzYhzg+WIc4PSCHOD04hzg9OIc4PVCHOD2Ahzg/SIc4PliHOD9ghzg+cIc4PfiHOD9ghzg+QIc4N2iHODdohzg3aIc4N2iHODdohzg3aIc4NjCHODughzg76Ic4O+iHODvohzg76Ic4PHiHODx4hzg8eIc4PHiHODvQhzg88Ic4PQiHOD0Ihzg9CIc4PQiHOD0Ihzg9CIc4PZiHOD2Yhzg9mIc4PZiHOD4Qhzg/AIc4MxiHOD8whzg/MIc4PzCHOD8whzg/MIc4PzCHODZIhzg7uIc4PACHODwAhzg8AIc4PACHOD7Qhzg+0Ic4PtCHOD7Qhzg9IIc4PliHOD0ghzg9IIc4PSCHOD0ghzg9IIc4PSCHOD5Yhzg+WIc4PliHOD5Yhzg/YIc4PTiHOD9ghzg3aIc4PzCHODdohzg/MIc4N2iHOD8whzg7oIc4O7iHODughzg7uIc4O6CHODu4hzg7oIc4O7iHODvQhzg9OIc4O9CHODMwhzg76Ic4PACHODvohzg8AIc4O+iHODwAhzg76Ic4PACHODvohzg8AIc4PEiHOD04hzg8SIc4PTiHODxIhzg9OIc4PEiHOD04hzg8YIc4PliHODxghzg+WIc4PHiHOD7Qhzg8eIc4PtCHODx4hzg+0Ic4PHiHOD7Qhzg8eIc4PtCHODoghzg8AIc4N4CHOD7Qhzg8kIc4PKiHODyohzg+6Ic4PtCHOD7ohzg+0Ic4PuiHOD7Qhzg+6Ic4M0iHOD7ohzg+0Ic4PPCHOD5Yhzg88Ic4PliHODzwhzg+WIc4M2CHODzwhzg+WIc4PQiHOD0ghzg9CIc4PSCHOD0Ihzg9IIc4M3iHODOQhzg/GIc4PVCHOD8Yhzg9UIc4PxiHOD1Qhzg9aIc4PYCHOD1ohzg9gIc4PWiHOD2Ahzg9aIc4PYCHOD4ohzg/SIc4PiiHOD9Ihzg+KIc4P0iHOD2Yhzg+WIc4PZiHOD5Yhzg9mIc4PliHOD2Yhzg+WIc4PZiHOD5Yhzg9mIc4PliHOD3Ihzg+cIc4PhCHOD9ghzg+EIc4PiiHOD5Ahzg+KIc4PkCHOD4ohzg+QIc4PoiHODOohzg9yIc4PnCHOD3Ihzg+cIc4PciHOD5whzg+EIc4P2CHOD7Qhzg+0Ic4PMCHODzYhzg3aIc4PzCHODPAhzgz2Ic4M/CHODhAhzg3aIc4PzCHODdohzg/MIc4N2iHOD8whzg3aIc4PzCHODdohzg/MIc4N2iHOD8whzg3aIc4PzCHODdohzg/MIc4N2iHOD8whzg3aIc4PzCHODdohzg/MIc4N2iHOD8whzg76Ic4PACHODvohzg8AIc4O+iHODwAhzg76Ic4PACHODvohzg8AIc4O+iHODwAhzg76Ic4PACHODvohzg8AIc4PHiHOD7Qhzg8eIc4PtCHOD0Ihzg9IIc4PQiHOD0ghzg9CIc4PSCHOD0Ihzg9IIc4PQiHOD0ghzg9CIc4PSCHOD0Ihzg9IIc4M8CHODPYhzgzwIc4M9iHODPAhzgz2Ic4M8CHODPYhzgzwIc4M9iHOD2Yhzg+WIc4PZiHOD5Yhzgz8Ic4OECHODPwhzg4QIc4M/CHODhAhzgz8Ic4OECHODPwhzg4QIc4PhCHOD9ghzg+EIc4P2CHOD4Qhzg/YIc4PiiHOD9Ihzg9OIc4NAiHODRohzg9OIc4NCCHODQ4hzg7oIc4O6CHODu4hzg70Ic4NFCHODRohzg9OIc4PSCHODvohzg0gIc4OxCHODwYhzg8SIc4PbCHODSYhzg0sIc4PHiHODyQhzg8qIc4NMiHODWghzg04Ic4PPCHOD5Yhzg9CIc4NPiHODUQhzg1KIc4PTiHOD8Yhzg9aIc4PYCHODVAhzg1WIc4P0iHODVwhzg/SIc4PiiHODWIhzg9sIc4PhCHODWghzg+KIc4PkCHODwAhzg8AIc4PfiHODtwhzg9gIc4PTiHODZghzg2eIc4NpCHODW4hzg10Ic4NeiHODYAhzg2GIc4OgiHODdohzg/MIc4PHiHOD7Qhzg9CIc4PSCHOD2Yhzg+WIc4PACHODYwhzg2SIc4PEiHOD04hzg8SIc4PTiHODyQhzg8qIc4PQiHOD0ghzg9CIc4PSCHODwAhzg46Ic4NmCHODZ4hzg2kIc4PEiHOD04hzg2qIc4NsCHODzwhzg+WIc4N2iHOD8whzg3aIc4PzCHODvohzg8AIc4O+iHODwAhzg8eIc4PtCHODx4hzg+0Ic4PQiHOD0ghzg9CIc4PSCHOD8Yhzg9UIc4PxiHOD1Qhzg9mIc4PliHOD2Yhzg+WIc4OxCHODbYhzg8YIc4PliHODbwhzg9OIc4NwiHODnYhzg+KIc4PkCHODdohzg/MIc4O+iHODwAhzg9CIc4PSCHOD4Qhzg/YIc4OBCHODcghzg3OIc4N1CHODdQhzg3aIc4O6CHODu4hzg+6Ic4PiiHOD2Ahzg+QIc4OWCHOD6ghzg7iIc4PZiHOD2whzg76Ic4PACHODeAhzg+0Ic4N5iHOD04hzg/GIc4PVCHOD4Qhzg/YIc4PzCHOD04hzg9OIc4PTiHODu4hzg3sIc4PTiHOD04hzg8AIc4PACHODfIhzg7cIc4O3CHODfghzg52Ic4PtCHOD04hzg9OIc4ORiHOD9ghzg/YIc4PliHOD5Yhzg+WIc4PtCHODrghzg3+Ic4OBCHODgohzg+0Ic4OECHODzYhzg82Ic4PNiHOD5Yhzg+WIc4OiCHOD0ghzg4WIc4OHCHODiIhzg9UIc4PVCHOD1Qhzg9UIc4PVCHODqAhzg6gIc4OKCHODighzg9gIc4PtCHOD7Qhzg+0Ic4PtCHOD9Ihzg/SIc4PliHODsQhzg4uIc4P2CHOD5whzg/YIc4P2CHOD5Ahzg40Ic4OOiHODjohzg5YIc4OWCHODlghzg7uIc4PQiHODkAhzg52Ic4ORiHODkwhzg+0Ic4PKiHODlIhzg9OIc4OWCHODlghzg5eIc4OZCHODmohzg5wIc4OdiHODnwhzg6CIc4OiCHODo4hzg6UIc4OmiHOD5Yhzg+WIc4PTiHOD04hzg8MIc4PNiHOD5Yhzg9OIc4PVCHODqAhzg9gIc4P0iHOD5Ahzg9OIc4OpiHODqwhzg6yIc4PtCHODrghzg9OIc4OviHODsQhzg9OIc4PTiHODwwhzg7KIc4PKiHOD7Qhzg82Ic4PliHOD04hzg9UIc4PYCHODtAhzg/YIc4PfiHOD5Ahzg/MIc4PTiHOD04hzg8AIc4O3CHODtwhzg7WIc4PtCHODu4hzg+0Ic4PliHODtwhzg7iIc4PTiHODuIhzg9OIc4O4iHOD04hzg7oIc4O7iHODvQhzg9OIc4O9CHOD04hzg70Ic4PTiHODvQhzg9OIc4O9CHOD04hzg76Ic4PACHODvohzg8AIc4O+iHODwAhzg76Ic4PACHODvohzg8AIc4PBiHODwwhzg8SIc4PTiHODxghzg+WIc4PGCHOD5Yhzg8YIc4PliHODxghzg+WIc4PGCHOD5Yhzg8eIc4PtCHODx4hzg+0Ic4PJCHODyohzg8kIc4PKiHODyQhzg8qIc4PuiHOD7Qhzg+6Ic4PtCHOD7ohzg+0Ic4PuiHOD7Qhzg8wIc4PNiHODzAhzg82Ic4PPCHOD5Yhzg88Ic4PliHODzwhzg+WIc4PPCHOD5Yhzg9CIc4PSCHOD0Ihzg9IIc4PQiHOD0ghzg9CIc4PSCHOD8Ahzg9OIc4PwCHOD04hzg/GIc4PVCHOD8Yhzg9UIc4PxiHOD1Qhzg/GIc4PVCHOD1ohzg9gIc4PWiHOD2Ahzg9aIc4PYCHOD1ohzg9gIc4PWiHOD2Ahzg+KIc4P0iHOD4ohzg/SIc4PiiHOD9Ihzg+KIc4P0iHOD2Yhzg+WIc4PZiHOD5Yhzg9mIc4PliHOD2Yhzg+WIc4PZiHOD5Yhzg9sIc4P2CHOD2whzg/YIc4PciHOD5whzg9yIc4PnCHOD3ghzg9+Ic4PeCHOD34hzg+EIc4P2CHOD4ohzg+QIc4PiiHOD5Ahzg+KIc4PkCHOD5Yhzg/SIc4PnCHOD9ghzg/MIc4PoiHOD6ghzg+uIc4PuiHOD7Qhzg+6Ic4PwCHOD8Yhzg/MIc4P0iHOD9ghzg/eIc4AAQWwBzAAAQUxBzAAAQNtBzAAAQY7BzAAAQfJBzAAAQfTBzAAAQSTBzAAAQaaBzAAAQVvBzAAAQcUBzAAAQZaBzAAAQVKBzAAAQUSBzAAAQblBzAAAQUbBzAAAQaJBzAAAQflBzAAAQM5BzAAAQKkBzAAAQhIBzAAAQkMBzAAAQdvBzAAAQYABzAAAQS+BzAAAQLwBzAAAQTyBzAAAQZKBzAAAQTsBzAAAQcrBzAAAQb2BzAAAQThBzAAAQknBzAAAQjyBzAAAQeeBzAAAQdWBzAAAQp5BzAAAQnTBzAAAQj4BzAAAQhiBzAAAQVoBzAAAQSBBzAAAQYXBzAAAQYhBzAAAQYpBzAAAQOcBzAAAQewBzAAAQWFBzAAAQKmBzAAAQYzBzAAAQRmBzAAAQaRBzAAAQWRBzAAAQLjBzAAAQN7BzAAAQM9BzAAAQYUBzAAAQb6BzAAAQakBzAAAQaDBzAAAQTXBzAAAQVcBzAAAQSaBzAAAQSmBzAAAQUdBzAAAQTLBzAAAQVMBzAAAQPTBzAAAQPVBzAAAQgMBzAAAQiJBzAAAQi+BzAAAQaHBzAAAQUIBzAAAQdcBzAAAQeyBzAAAQXDBzAAAQVtBzAAAQRKBzAAAQTZBzAAAQMvBzAAAQQxBzAAAQSHBzAAAQi6BzAAAQNCBzAAAQVEBzAAAQUvBzAAAQayBzAAAQQSBzAAAQXuBzAAAQRxBzAAAQVgBzAAAQUZBzAAAQQdBzAAAQXsBzAAAQR7BzAAAQS6BzAAAQRkBzAAAQMZBzAAAQXLBzAAAQYfBzAAAQMdBzAAAQVQBzAAAQT2BzAAAQeLBzAAAQfbBzAAAQaBBzAAAQZeBzAAAQT0BzAAAQUQBzAAAQOiBzAAAQRoBzAAAQP6BzAAAQYMBzAAAQUzBzAAAQe8BzAAAQVWBzAAAQSgBzAAAQT+BzAAAQSiBzAAAQPnBzAAAQVCBzAAAQbZBzAAAQMQBzAAAQPpBzAAAQTBBzAAAQJxBzAAAQSFBzAAAQUGBzAAAQVIBzAAAQTVBzAAAQN5BzAAAQSNBzAAAQZWBzAABAAAAAEACAABAAwAEgABAFoAZgABAAEFBgABACIAJAAmACgAKgAsAC4ALwAxADIANQA2ADcAOABEAEYASABKAEwATgBPAFEAUgBVAFYAVwBYAJAAnACdAK8AsAC8AL0A/AABAAAABgABAAAAAAAiAEYATA2mCwwAfAmqDaYNTAe4CnwAUgnOCbYAdgBYAF4AZACICRQAiABqCRQAiABwAHYAggB8CbYJtgCIAIgAggCCAIgAAQKsAAAAAQLbAAAAAQIwAAAAAQJBAAAAAQJiAAAAAQJs/jQAAQKyAAAAAQHqAAAAAQIIAAAAAQGQAAAAAQJEAAAAAQFAAAAABAAAAAEACAABAAwAEgABAKwAuAABAAEFBwACABkAJAA9AAAARABdABoAggCYADQAmgC4AEsAugFCAGoB+gIBAPMCNwI3APsCTgJOAPwCUwJWAP0CXAJfAQEDGgNxAQUDfgOzAV0DtwO6AZMDvwPPAZcD2APYAagD3QQZAakEHgQfAeYEIgSeAegF9AYiAmUGVQbmApQIAwgDAyYIUQhRAycIYghoAygIcwhzAy8Idgh2AzAAAQAAAAYAAQCdAAADMQZkCkgILApsDEAMRgw0CtgK6ghECvAMQAsUCyAMCgxGBmoMTAtiC3oLyAwuC+AL5gvyB2YMUgpOCFwKcgqiCswK0gsmDDoJOgr2DDoLGgsmC0QLSglMDDoLaAxYC84MXgwiC+wMKAv+C+YL5gvmC+YL5gvmBnAGdgxADEAMQAxACuoK6grqCuoKbAsgDAoMCgwKDAoMCgwKC8gLyAvIC8gL8gxGBnwMUgxSDFIMUgxSDFIGggdsCqIKogqiCqIMOgw6DDoMOgtECyYLRAtEC0QLRAtEC0QLzgvOC84LzgwoC0oMKAvmDFIL5gxSBogGjggsCFwILAhcCCwIXAgsCFwKbApyCmwKcgxACqIMQAqiDEAKogs4CzgMQAqiDDQK0gw0CtIMNArSBpQK0grYCyYK2AsmCuoMOgrqDDoK6gw6BpoGoArqDDoGpgpgCEQJOgrwCvYK9gxADDoGrAayDEAMOgxADDoMQAw6CyALJgsgCyYLIAsmBrgGvgbEDAoLRAwKC0QMCgtECJ4GygxMDDoMTAbQDEwMOgtiC2gLYgtoBtYG3AtiC2gKrgbcC3oMWAt6DFgLyAvOC8gLzgvIC84LyAvOC8gLzgs4BuIL4AwiC/IMKAvyB2YL/gdmB+QHZgfkBugHHgvgDCIL4AwiC+AMIgvyDCgJOgk6CxQLGgvmDFIMCgtEC8gLzgvmDFIL5gxSC+YMUgvmDFIL5gxSC+YMUgvmDFIL5gxSC+YMUgvmDFIL5gxSC+YMUgqcCqIKnAqiCpwKogqcCqIKnAqiCpwKogqcCqIKnAqiCuoMOgbuCvwHAAb0DAoLRAwKC0QMCgtEDAoLRAwKC0QHAAb6DAoLRAwKC0QMCgtEDAoLRAcABwYHDAvOC8gLzgvIC84LyAvOC8gLzgvIC84HDAvOBxIMKAvyDCgL8gwoBxgHHgpOByQKSApOCk4KTgcqCCwIXApsBzALRApyB8YJvgc2C/4J7gw0BzwHQgtoCuoK8Ar2DDoHSAdOCyAIdAwKB1QHWgxeC0oHYAtiCGIHZgdsB2wHcgxYB3gMCgwuC/IMKAv4C/4HfgqiB4QMWAeKC0oHzAfSB9gHkAeWB5wHogeoB64L5gxSCuoMOgwKC0QLyAvOC3oHtAe6DDQK0gw0CtIK8Ar2B8AKkAfACpAHxgkcB8wH0gfYDDQK0gfeC0oLIAsmC+YMUgvmDFIKnAqiCpwKogrqDDoK6gw6DAoLRAwKC0QMTAw6DEwMOgvIC84LyAvOB+QH6grYCyYH8Af2C8gKogf8CAIL5gxSCAgIDgwKC0QL8gwoCBQIGgxYCCAIJgvmCCwIXAxAC3oIMguqCDgIOApIC8gIPgqcCqIIRAk6CEoIVgxMDDoL8gwoCFwKlgpOCk4IUAhcCFYKlgxeDF4MXghcCGIIYgm+CToK0grSCGgIbgxeCHQLJgh6DDoIgArqCIYIjAiSCJgIngikCKoLJgiwCLYLRAi8CMIIyAjOCM4JoAtKCNQMOgjaCOAMXgjmCOwI7AjyCPgI/gpmC84LRAtEC+wJBAvsCQoJEAkWCRwJHAlSCVgJIgmsDAoJKAkuDC4JNAk6CUAJRglMCVIJWAleCWQJaglwCXYJfAmCC/gJiAvICY4JlAmaCk4KcgrMCxoLJgtKDDoMOgtoDFgL/gmgCaYJrAmyDDoJuAtKCb4JvgnECcoJ0ArSCdYKMAncCeILSgowCegJ7gn0CfoKAAoGCgwKEgoYCh4KQgokCioKMAo2CjwKQgpICk4KeApUCn4KWgpgCmYKbApyCngKlgp+CoQKigqWCpAKlgqcCqIKnAqiCqgKrgq0CroKwArGDEYKzAw0CtIK2AsmCtgLJgrYCyYK2AsmCtgLJgreCuQK6gw6CvAK9grwCvYLVgsyDBAK/AwQCvwMEAsCCwgLDgsUCxoLFAsaCyALJgsgCyYLLAsyCzgLPgwKC0QMCgtEDAoLRAwKC0QMRgtKDEYLSgxMDDoMTAtQDEwLUAtWC1wLYgtoC24LdAtiC2gLYgtoC24LdAt6DFgLgAuGC4wLkguYC54LpAuqC7ALtgu8C8ILyAvOC8gLzgwuDF4L1AvaC+AMIgvgDCIL5gvsC+YL7AvyDCgL+Av+DAQMCgwQDBYMHAxYDCIMKAxSDDoMLgw0DEAMOgxADEYMTAxSDFgMXgxkAAEFQwAAAAEDNAAAAAEG/QAAAAEDXP5IAAEEQgAAAAEF0gAAAAEE7P5wAAEEE/5wAAEFJwA/AAECKf5wAAEBs/5wAAEEOP6sAAED1AAAAAEBqAAAAAEFbgAAAAEFFP6sAAED1P5IAAEGNgAAAAEBqf5wAAECqP5wAAECbP5wAAEEZv5wAAEBYgAAAAECAf7KAAEC7v7KAAEC5P7KAAEDmP7KAAEC+P7KAAEDcP7KAAEC8f7KAAECiv5IAAECCP5IAAEEEAAFAAECvAAFAAEEGgAUAAED3gAAAAEDDP5IAAEFqgAAAAEEdAAAAAEFUAAAAAEINgAAAAEGrf4UAAEFAP8zAAEEBgAAAAECqP5IAAEDEwAAAAEDU/5IAAECrgAAAAECowAAAAECTwAAAAEFif6sAAEFif5IAAEDdf5IAAEHhf6sAAEHhf5IAAEGYv5IAAEG8wAAAAEFkQAAAAEDyv5wAAEClAAAAAEKHQAAAAEJbgAAAAEIkgAAAAEGTgAAAAEC5AAAAAECdv5IAAEFPf4UAAEE2P5IAAEBngAAAAEBigAAAAECzf5IAAEC5f5IAAECcAAAAAEFLwAAAAEF8AAUAAEGBAAUAAEDegAAAAEC7f5IAAECBwAAAAEEsAAAAAEBBP6sAAEFtP5IAAECJgAAAAEExP5IAAEC0AAAAAECigAAAAEDFgAAAAECvP5IAAEEgP4UAAED6P5IAAECTgAAAAECLgAAAAECTQAAAAECHP5IAAED/P5IAAEHCAAAAAEHGv4UAAEGaP5IAAEE7P5IAAEExAAAAAEGQAAAAAEE9gAAAAEDqf4UAAEC2gAAAAECK/5IAAECbv4UAAEEfgAAAAEB4P5IAAEBHP5IAAECRP5IAAEA1/5IAAECSgAAAAEGgwAAAAECtwAAAAEETP5IAAEDrAAUAAEClP5IAAECLQAAAAEC3QAAAAEC7gAAAAEEhAAAAAEBBP5IAAEELv4UAAEDYwAAAAEESf4UAAECBQAAAAECsQAAAAEHbQAAAAEGlf5IAAEHwQAUAAEE7AAUAAEDsv5IAAEF3QAAAAEGSf5IAAEEzQAAAAEEJQAAAAEEev4UAAEFAP5IAAEDSP5IAAEDgQK0AAEC0P5IAAEH+QAAAAECWAAAAAEDIAAAAAED1P6EAAEEfv6EAAEB9P6EAAEEYP6EAAEHRP6EAAEEsP6EAAEC8P6EAAEBLP5IAAEDrP6EAAEEEP6EAAEDPv6EAAEFBv6EAAEFRv6EAAEEzv5IAAEEiP6EAAEEnP6EAAEF3f6EAAECqP6EAAEBzP6EAAEBQP6EAAEFZP6EAAEBpP6EAAEDFgAFAAEDXAAAAAEDIP7KAAEC0P7oAAEDNP5IAAECgP5IAAEDIAAUAAECsgAUAAEDFv7KAAEDFv7oAAEC7v7oAAEC5P5wAAEDDP5wAAEEPQAAAAED2gAAAAEC+AAAAAECsv5wAAEC0P5wAAECsv6iAAECxv6iAAEC2v5wAAEC7v5wAAEB0AAAAAEC+P5IAAEFPgAAAAECAf6iAAEBqf6iAAECAQAAAAEE9AAAAAEEnAAAAAEBqf7KAAEBqf7oAAECvP5wAAEBkP5wAAEGrQAAAAEHGgAAAAEFgAAAAAEEgAAAAAEDrP7oAAEDDP7oAAEDrP5wAAEDAv5wAAEDDAAAAAEBqf4UAAEBpP7KAAEDcP7oAAEBzP7oAAECqAAAAAECbAAAAAECqP7KAAECbP7KAAECxAAAAAECxP7KAAECgP7KAAECxP7oAAECgP7oAAECrf5wAAECYv5wAAEDcP5cAAEDJv5IAAEDcP6iAAEDJv6iAAEDcP5wAAEC+P5wAAEDcAAAAAEEegAAAAEDAv7KAAECvP7KAAEGDgAAAAEE7AAAAAEETAAAAAEC8QAAAAEEOAAAAAEDSAAAAAEEagAAAAEDmAAAAAEC+P7oAAECdv7oAAEDNP7oAAEFWgAAAAEBpP5IAAEDAgAAAAEDwAAAAAEBqQAAAAEDrAAAAAEBwQAAAAEFAAAAAAEEEwAAAAECgAAAAAECvAAAAAEDiwAAAAQAAAABAAgAAQAMABQAAQA2AEwAAQACBQEFAgACAAUARQBHAAAASQBLAAMATQBRAAYAUwBaAAsAXQBdABMAAgAAAAoAAAAQAAEAAABWAAEAogBWABQAWgAqADAANgA8AFQAQgBIAGYATgBUAFoAYABmAGwAcgB4AH4AhACKAAEDugBWAAEEcQBWAAECAgBWAAEDFv5wAAEBaP6EAAEE9v/sAAEHQgBWAAEEqABWAAEEEABWAAEEcf7UAAEB0QBWAAEDIABWAAEDNwBWAAEEogBWAAEC4wBWAAEFgwBWAAEDqgBWAAYCAAABAAgAAQAMAAwAAQBwAfwAAgAQAmACYgAAAowCjwADA3MDcwAHBOQE9AAIBPoE+gAZBRwFHgAaBSEFIwAdBSUFJQAgBSkFKwAhBS8FMQAkBTYFNgAnBToFOgAoBUIFTgApBkgGSQA2BksGUQA4BlMGUwA/AEAAAAECAAABGgAAARoAAAEIAAABDgAAARQAAAEUAAABGgAAAVYAAAFWAAABIAAAAVYAAAFWAAABVgAAASYAAAFWAAABLAAAAVYAAAEyAAABMgAAAVYAAAFWAAABOAAAATgAAAE4AAABYgAAAT4AAAFQAAABRAAAAVYAAAFKAAABegAAAVYAAAFQAAABUAAAAVYAAAGGAAABhgAAAVwAAAGGAAABYgAAAWgAAAFoAAABaAAAAWgAAAFoAAABaAAAAWgAAAFoAAABaAAAAWgAAAFoAAABaAAAAWgAAAFuAAABbgAAAXQAAAF6AAABegAAAXoAAAF6AAABgAAAAYAAAAGGAAH9eAS4AAECPASIAAECeAS4AAECTwS4AAH9fgS4AAEAAAXSAAH9jgS4AAH/sAS4AAEAAASQAAEAAAOiAAEAAAR0AAEAAAXwAAEAAARUAAEAAARgAAEAAAS4AAEAAATEAAEAAASkAAEAAAH+AAEAAATYAAEAAASmAAEAAASwAAEAAAScAAEAAASaAEAAmgCaAIIAiACOAJQAlACaAO4AoACyAO4A7gDEAKYArADuAO4A7gDuALIBAADKAMoAygC4AOgA3AC+AMQAygDQAOgA7gDWANwBDAD6AOIA+gDoAPQA9ADuAPQA9AD0AQYBBgD0APQA9AD0APQA+gD6AQABBgEGAQYBBgEGAQYBDAAB/X4GVAABAjwGBAABAngGaAABAk8GpAAB/X4GaAABAAAGBAAB/ZIHCAABAAAG9AABAAAHCAABAAAG4AABAAAH+AABAAAGVAABAAAGGAABAAAGzAABAAAHvAABAAAHMAABAAAHHAABAAAGkAABAAAGaAABAAAFZAABAAAGuAABAAAGfAABAAAGQAABAAAGpAAGAwAAAQAIAAEADAAMAAEAZAFsAAIADgJkAmQAAAT2BPkAAQT8BQAABQUDBQUACgUIBRIADQUYBRsAGAUkBSQAHAUmBSgAHQUsBS0AIAUyBTUAIgU4BTkAJgZKBkoAKAZSBlIAKQZUBlQAKgArAAAArgAAAQIAAAECAAAA8AAAAPAAAAC0AAABAgAAAQIAAADwAAAAxgAAALoAAAD8AAAA2AAAAPYAAADAAAAA2AAAAQIAAAD2AAAA2AAAANgAAADYAAAAxgAAAMwAAADeAAAA0gAAAPYAAAD8AAAA2AAAANgAAADeAAAA5AAAAOoAAAD2AAAA8AAAAPAAAAD2AAAA9gAAAPYAAAD2AAAA8AAAAPYAAAD8AAABAgAB/X//fgABABT/zgABAAD/YAABAAD/pgABAAD/dAABAAD/VgAB/+z/2AABAAD/iAABAAD/2AABAAD/ugABAAD/kgABAAD/xAABAAD/sAABAAD/zgABAAD/nAArAFgAagBqAKAAoABeAJQAlACIAHAAZACaAKAAlACUAHAAagBqAJQAagBqAHwAcACIAHYAoACaAHwAggCIAJQAmgCUAI4AlACaAJoAmgCaAJoAlACaAKAAAf1//fgAAQAU/bIAAQAA/hYAAQAA/fgAAQAA/jQAAf/s/bIAAQAA/iAAAQAA/bIAAQAA/cYAAQAA/agAAQAA/eQAAQAA/bwAAQAA/dAAAgAIAAIACgDaAAEAJAAEAAAADQBCAEgAZgBsAIoAkACWAKQAugC6AMAAwADGAAEADQApANEA8AEAAWABZAFvAXMBgwGUApoCrALJAAEAIgAUAAcAIgBQAEUAHgBLAB4ATgAeAE8AHgDnAB4A6QA8AAEALQAyAAcAIgBGAEUAHgBLAB4ATgAeAE8AHgDnAB4A6QBGAAEBdP/sAAEBc//iAAMBZP/2AXT/7AGI/+wABQFk/+IBcP/2AXH/2AF0//YBiP/2AAEBgf/sAAEBowAyAAIBowAyAb7/7AABA9gABAAAAecVEhUSB7QYoBTEGKAzjjTeCEA03jSSBzAIQAnGNN4HujTeOcI1+hHuEe4IQDa2DFYHtDSANJg87DSYB6I0gAjWNIA0gDSYNJgLADzsOZw5nAjWOZwHtDOOM44zjjOOM44zjjSSCEA0kjSSNJI0kjTeNN403jTeNN403jTeNfo1+jX6Nfo2tge6NIA0gDSANIA0gDSANJg0mDSYNJg0mDSYNJg0mA+6NJg5nDSYOZwzjjSAM440gDOONIAIQAhACEAIQDTeCag03jSSNJg0kjSYNJI0mDSSNJg0kjSYNIAIQAjWCNYJxgnGCcYJqAnGCcY0gDTeNN403jSSCwALAAsAOcI87DnCPOw5wjX6Nfo1+jX6Nfo1+hHuOZw2tjmcNrYMVgxWDFYzjjSANJI03g0YPP4a4hq+D7oNGAzsDWwNGA0OPP4OUA0YPP4NSg1sGuIOIg5QDmYavhriDpA0gA7eDpAPVA6WNIAOzA7eDvgPFg9UD8wPzA9qD8wPfA+6D8wRFCkSIE4P5hEUERQRFC+MJ6okvA/wJfApEjN8L4wl8C+MJjod0iBOKRInqhA2L4wzfDN8ERQRFCY6Jjo0gCYaK4g0gDSAJXomGiV6JzAnMCuIKOQnMCV6NIA0gB00HTQnMCcwNIAmICuIHTQdNCYgJXoo5CkSK4gR7jmcEe45nBHuOZw2tjmcFMQUxBTEFRIVEhigFRIYoBq+GtAa4jOONIA1sDZsNIAmOijkHLodNCBOJLwbeCS8G3gcThxOJfAcWCcwJjonMCeqKOQnqijkKOQo5CY6JzAmOicwJjoo5CBOM3w0gBy6HTQd0icwLB4ucB5YHu4u9jAaJfAmGi72MBovjCV6L4wlei+MJXozfDSALB4riB8IH64gFCcwIE4pEiuIIMAiziDAIs4u9jAaM3w0gDN8NIAi/COmI/Q0gCP0NIAvjCV6JBYzZjN8NIAzfDSAM3w0gCS8NIAkvDSANIA0gCY6JzAmOicwL4wleiXwJhomICY6JzAmOicwJjonMCY6JzAnqijkJ6oo5CeqKOQpEiuILB4ucC72MBovjDAaMJAy7DCQMuwzfDSAMJAy7DCQMuwxkjHwMj4y7DNmM3w0gDOONIAzjjSAM440gDOONIAzjjSAM440gDOONIAzjjSAM440gDOONIAzjjSAM440gDSSNJg0kjSYNJI0mDSSNJg0kjSYNJI0mDSSNJg0kjSYNN40mDTeNJg03jTeNN403jTeNbA1sDWwNbA1sDX6Nmw2bDZsNmw2bDa2OZw2tjmcNrY5nDnCPOw8/gACAI4ABQAFAAAACgALAAEADwARAAMAJAApAAYALgAvAAwAMgA0AA4ANwA+ABEARABGABkASABJABwASwBLAB4ATgBOAB8AUABTACAAVQBVACQAVwBXACUAWQBcACYAXgBeACoAggCNACsAkgCSADcAlACYADgAmgCgAD0AogCnAEQAqgCtAEoAsgCyAE4AtAC2AE8AuAC4AFIAugC6AFMAvwDIAFQAygDKAF4AzADMAF8AzgDOAGAA0ADSAGEA1ADdAGQA5wDnAG4A+AD7AG8A/QD9AHMA/wEBAHQBAwEDAHcBCAEIAHgBDgEOAHkBEAEQAHoBEgESAHsBFAEUAHwBFwEXAH0BGQEZAH4BGwEbAH8BJAEoAIABKgEqAIUBLAEsAIYBLgEuAIcBMAEwAIgBMgEyAIkBNAE0AIoBNgE7AIsBPQE9AJEBPwE/AJIBQwFFAJMBRwFHAJYBVgFWAJcBWwFiAJgBZAFkAKABZgFmAKEBaAFpAKIBbQFtAKQBbwFvAKUBcQF2AKYBeAF5AKwBewF8AK4BfgF+ALABgAGAALEBgwGIALIBigGKALgBjAGMALkBjgGOALoBkAGQALsBkwGUALwBlwGXAL4BmQGZAL8BnQGgAMABpAGoAMQBqgGuAMkBsAGxAM4BtAG0ANABuAG4ANEBugHAANIBwwHEANkBxgHIANsBygHKAN4BzAHRAN8B1AHUAOUB2AHYAOYB2gHaAOcB3AHgAOgB4wHkAO0B5gHoAO8B6gHsAPIB8gH2APUB+AIEAPoCBgIIAQcCCgIKAQoCDAIMAQsCIQIhAQwCUAJRAQ0CVQJWAQ8CXQJdARECXwJfARICZwJnARMCaQJtARQCbwJzARkCdQJ1AR4CdwJ3AR8CeQKJASACkgKwATECsgK9AVACwALFAVwCxwLMAWICzwLQAWgC0wLUAWoC1gLZAWwC2wLbAXAC3QLmAXEC7AL5AXsC/AL9AYkDAAMFAYsDCAMWAZEDGANBAaADRgNKAcoDTANMAc8DTgNOAdADUANQAdEDUgNSAdIDVQNVAdMDVwNXAdQDWQNZAdUDWwNbAdYDXQNeAdcDYwNjAdkDZQNlAdoDZwNnAdsDaQNpAdwDawNxAd0DfgN/AeQHLwcvAeYAHAAP/8QAEf/EACT/7ACC/+wAg//sAIT/7ACF/+wAhv/sAIf/7ADC/+wAxP/sAMb/7AFD/+wCCP/EAgz/xAJV/+wDGv/sAxz/7AMe/+wDIP/sAyL/7AMk/+wDJv/sAyj/7AMq/+wDLP/sAy7/7AMw/+wABAAFADwACgA8AgcAPAILADwAAQAtAFoAIQAP/34AEf9+ACT/zgA7/+wAPf/2AIL/zgCD/84AhP/OAIX/zgCG/84Ah//OAML/zgDE/84Axv/OATv/9gE9//YBP//2AUP/zgII/34CDP9+AlX/zgMa/84DHP/OAx7/zgMg/84DIv/OAyT/zgMm/84DKP/OAyr/zgMs/84DLv/OAzD/zgAlACb/7AAq/+wAMv/sADT/7ACJ/+wAlP/sAJX/7ACW/+wAl//sAJj/7ACa/+wAyP/sAMr/7ADM/+wAzv/sAN7/7ADg/+wA4v/sAOT/7AEO/+wBEP/sARL/7AEU/+wBR//sAlz/7ANG/+wDSP/sA0r/7ANM/+wDTv/sA1D/7ANS/+wDVP/sA1b/7ANY/+wDWv/sA1z/7AA0AEb/7ABH/+wASP/sAFL/7ABU/+wAov/sAKn/7ACq/+wAq//sAKz/7ACt/+wAtP/sALX/7AC2/+wAt//sALj/7AC6/+wAyf/sAMv/7ADN/+wAz//sANH/7ADT/+wA1f/sANf/7ADZ/+wA2//sAN3/7AEP/+wBEf/sARP/7AEV/+wBSP/sAl3/7AMz/+wDNf/sAzf/7AM5/+wDPf/sAz//7ANB/+wDR//sA0n/7ANL/+wDT//sA1H/7ANT/+wDVf/sA1f/7ANZ/+wDW//sA13/7AAHAAUAKAAKACgADABGAEAARgBgAEYCBwAoAgsAKABOAAX/sAAK/7AAJv/sACr/7AAy/+wANP/sADf/7AA4//YAOf/sADr/7AA8/+IAif/sAJT/7ACV/+wAlv/sAJf/7ACY/+wAmv/sAJv/9gCc//YAnf/2AJ7/9gCf/+IAyP/sAMr/7ADM/+wAzv/sAN7/7ADg/+wA4v/sAOT/7AEO/+wBEP/sARL/7AEU/+wBJP/sASb/7AEq//YBLP/2AS7/9gEw//YBMv/2ATT/9gE2/+wBOP/iATr/4gFH/+wB+v/sAfz/7AH+/+wCAP/iAgf/sAIL/7ACXP/sAl7/9gNG/+wDSP/sA0r/7ANM/+wDTv/sA1D/7ANS/+wDVP/sA1b/7ANY/+wDWv/sA1z/7ANe//YDYP/2A2L/9gNk//YDZv/2A2j/9gNq//YDbP/iA27/4gNw/+IDfv/sAFUABQAoAAoAKABE/+wARv/sAEf/7ABI/+wASv/2AFL/7ABU/+wAov/sAKP/7ACk/+wApf/sAKb/7ACn/+wAqP/sAKn/7ACq/+wAq//sAKz/7ACt/+wAtP/sALX/7AC2/+wAt//sALj/7AC6/+wAw//sAMX/7ADH/+wAyf/sAMv/7ADN/+wAz//sANH/7ADT/+wA1f/sANf/7ADZ/+wA2//sAN3/7ADf//YA4f/2AOP/9gDl//YBD//sARH/7AET/+wBFf/sAUT/7AFG/+wBSP/sAgcAKAILACgCVv/sAl3/7AMb/+wDHf/sAx//7AMj/+wDJf/sAyf/7AMp/+wDK//sAy3/7AMv/+wDMf/sAzP/7AM1/+wDN//sAzn/7AM9/+wDP//sA0H/7ANH/+wDSf/sA0v/7ANP/+wDUf/sA1P/7ANV/+wDV//sA1n/7ANb/+wDXf/sACUAJv/2ACr/9gAy//YANP/2AIn/9gCU//YAlf/2AJb/9gCX//YAmP/2AJr/9gDI//YAyv/2AMz/9gDO//YA3v/2AOD/9gDi//YA5P/2AQ7/9gEQ//YBEv/2ART/9gFH//YCXP/2A0b/9gNI//YDSv/2A0z/9gNO//YDUP/2A1L/9gNU//YDVv/2A1j/9gNa//YDXP/2AAgAD//YABH/2AFW/+wBX//sAWL/7AFp/+wCCP/YAgz/2AACAWb/9gFt//YADAAF/7oACv+6AWb/7AFt/+wBcf+6AXL/xAFz/+wBdf/YAXj/xAIH/7oCC/+6AlH/xAAIAA//fgAR/34BVv/OAV//zgFi/84Baf/OAgj/fgIM/34ALQAP/8QAEP/YABH/xAFW/7ABX/+wAWL/sAFm/+IBaf+wAW3/4gFz/84Bdv/iAXn/ugF6/84Be//OAXz/2AF9/84Bfv+6AYD/7AGB/+IBgv/OAYT/zgGG/9gBh//OAYn/zgGK/+wBjP+6AY7/zgGP/7oBkP+6AZL/zgGT/7oBlP/sAZX/zgGW/84BmP/OAZn/ugGa/84Bm//OAgL/2AID/9gCBP/YAgj/xAIM/8QCIf/iAlD/7AALAA//zgAR/84BVv/sAV//7AFi/+wBaf/sAXL/4gF4/+ICCP/OAgz/zgJR/+IABQFm/+wBbf/sAXP/4gGN//YBkf/2AAoAD//EABH/xAFW/9gBX//YAWL/2AFm//YBaf/YAW3/9gII/8QCDP/EAAEBiAAUAA0AEP/OAXn/7AF+/+wBjP/sAY3/7AGP/+wBkP/sAZH/7AGT/+wBmf/sAgL/zgID/84CBP/OAAQAD//sABH/7AII/+wCDP/sAAYABf/YAAr/2AGN//YBkf/2Agf/2AIL/9gABwF5/+wBfv/sAYz/7AGP/+wBkP/sAZP/7AGZ/+wADwAF/8QACv/EAXn/9gF+//YBgP/sAYr/7AGM//YBjf/sAY//9gGQ//YBkf/sAZP/9gGZ//YCB//EAgv/xAAFAA//2AAR/9gBiP/2Agj/2AIM/9gABAAP//YAEf/2Agj/9gIM//YADwAP/+IAEP/sABH/4gF5/+wBfv/sAYz/7AGP/+wBkP/sAZP/7AGZ/+wCAv/sAgP/7AIE/+wCCP/iAgz/4gAEAAX/7AAK/+wCB//sAgv/7AAGAAX/9gAK//YBgP/sAYr/7AIH//YCC//2AAIDC//sAw3/7AARAAX/7AAK/+wBqv/2AcH/7AIH/+wCC//sAm//9gJ5/+wCvP/sAr7/7ALC/+wCxP/sAtH/7ALW//YC2P/2Atr/9gL6/+wANwAP/9gAEf/YAZ3/7AGk/+wBpv/sAaj/4gGq/+wBrv/sAbD/7AGx/+wBtf/sAbz/4gG9/+IBv//sAcT/7AHH/+wBzv/2AdX/9gHy//YCCP/YAgz/2AJv/+wCcP/2Anf/7AJ9//YCf//2Apz/7AKe/+wCpv/sArL/4gK0/+ICtv/iArj/7AK6/+wCx//sAsv/7ALM//YC1v/sAtj/7ALa/+wC4v/sAuT/7ALy/+wC9P/iAvb/4gL4/+IDAv/sAwT/7AMK/+wDDP/sAw7/7AMP//YDFP/sAxj/7AMZ//YANgAF/9gACv/YAZ3/xAGm/8QBqP/sAbz/zgG9/+wBwf/OAcT/xAHc/+wB3f/sAeH/7AHk/+wB9v/sAgf/2AIL/9gCa//YAnn/zgJ9/9gCf//YApT/2AKY/9gCpP/YAqb/xAKn/+wCsv/OArP/7AK0/84Ctf/sArb/zgK3/+wCuv/EArv/7AK8/84Cvf/sAr7/zgK//+wC0f/OAtL/7AL0/+wC9f/sAvb/7AL3/+wC+P/sAvn/7AL6/84C+//sAwD/2AMK/84DC//iAwz/zgMN/+IDFP/EAxX/7AC1AA//zgAR/84AIgAUACT/2AAm//YAKv/2ADL/9gA0//YARP/sAEb/7ABH/+wASP/sAEr/9gBQ//YAUf/2AFL/7ABT//YAVP/sAFX/9gBW//YAWP/2AIL/2ACD/9gAhP/YAIX/2ACG/9gAh//YAIn/9gCU//YAlf/2AJb/9gCX//YAmP/2AJr/9gCi/+wAo//sAKT/7ACl/+wApv/sAKf/7ACo/+wAqf/sAKr/7ACr/+wArP/sAK3/7AC0/+wAtf/sALb/7AC3/+wAuP/sALr/7AC7//YAvP/2AL3/9gC+//YAwv/YAMP/7ADE/9gAxf/sAMb/2ADH/+wAyP/2AMn/7ADK//YAy//sAMz/9gDN/+wAzv/2AM//7ADR/+wA0//sANX/7ADX/+wA2f/sANv/7ADd/+wA3v/2AN//9gDg//YA4f/2AOL/9gDj//YA5P/2AOX/9gD6//YBBv/2AQj/9gEN//YBDv/2AQ//7AEQ//YBEf/sARL/9gET/+wBFP/2ARX/7AEX//YBGf/2AR3/9gEh//YBK//2AS3/9gEv//YBMf/2ATP/9gE1//YBQ//YAUT/7AFG/+wBR//2AUj/7AFK//YCCP/OAgz/zgJU//YCVf/YAlb/7AJc//YCXf/sAl//9gMa/9gDG//sAxz/2AMd/+wDHv/YAx//7AMg/9gDIv/YAyP/7AMk/9gDJf/sAyb/2AMn/+wDKP/YAyn/7AMq/9gDK//sAyz/2AMt/+wDLv/YAy//7AMw/9gDMf/sAzP/7AM1/+wDN//sAzn/7AM9/+wDP//sA0H/7ANG//YDR//sA0j/9gNJ/+wDSv/2A0v/7ANM//YDTv/2A0//7ANQ//YDUf/sA1L/9gNT/+wDVP/2A1X/7ANW//YDV//sA1j/9gNZ/+wDWv/2A1v/7ANc//YDXf/sA1//9gNh//YDY//2A2X/9gNn//YDaf/2A2v/9gATADf/2AEk/9gBJv/YAXH/2AGd/9gBpv/YAbz/2AHE/9gB3P/sAeT/7AKm/9gCp//sArL/2AKz/+wCuv/YArv/7AMU/9gDFf/sA37/2ADjACT/ugA3ABQAOQAUADoAFAA8AAoARP/YAEb/xABH/8QASP/EAEr/4gBQ/+IAUf/iAFL/xABT/+IAVP/EAFX/4gBW/+IAWP/iAIL/ugCD/7oAhP+6AIX/ugCG/7oAh/+6AJ8ACgCi/8QAo//YAKT/2ACl/9gApv/YAKf/2ACo/9gAqf/EAKr/xACr/8QArP/EAK3/xAC0/8QAtf/EALb/xAC3/8QAuP/EALr/xAC7/+IAvP/iAL3/4gC+/+IAwv+6AMP/2ADE/7oAxf/YAMb/ugDH/9gAyf/EAMv/xADN/8QAz//EANH/xADT/8QA1f/EANf/xADZ/8QA2//EAN3/xADf/+IA4f/iAOP/4gDl/+IA+v/iAQb/4gEI/+IBDf/iAQ//xAER/8QBE//EARX/xAEX/+IBGf/iAR3/4gEh/+IBJAAUASYAFAEr/+IBLf/iAS//4gEx/+IBM//iATX/4gE2ABQBOAAKAToACgFD/7oBRP/YAUb/2AFI/8QBSv/iAVb/ugFf/7oBYv+6AWn/ugF5/9gBev/sAXv/7AF+/9gBgf/iAYL/7AGD/+wBhP/sAYf/7AGJ/+wBjP/YAY7/4gGP/9gBkP/YAZP/2AGZ/9gBpP/EAar/ugGu/8QBtf/EAcr/7AHO/7oBz//EAdX/ugHY/8QB2//EAd7/xAHq/8QB7f/EAe7/4gHy/7oB+gAUAfwAFAH+ABQCAAAKAlT/4gJV/7oCVv/YAl3/xAJf/+ICZ//EAm//ugJw/7oCev/2Anz/xAKC/8QChP/EAob/xAKK/8QCr//EArH/xALL/8QCzP+6Atb/ugLX/+wC2P+6Atn/7ALa/7oC2//sAt3/xALf/+wC4f/sAu3/xALv/8QC8f/EAwb/ugMH/8QDCP+6Awn/xAMO/8QDD/+6AxP/xAMX/8QDGP/EAxn/ugMa/7oDG//YAxz/ugMd/9gDHv+6Ax//2AMg/7oDIv+6AyP/2AMk/7oDJf/YAyb/ugMn/9gDKP+6Ayn/2AMq/7oDK//YAyz/ugMt/9gDLv+6Ay//2AMw/7oDMf/YAzP/xAM1/8QDN//EAzn/xAM9/8QDP//EA0H/xANH/8QDSf/EA0v/xANP/8QDUf/EA1P/xANV/8QDV//EA1n/xANb/8QDXf/EA1//4gNh/+IDY//iA2X/4gNn/+IDaf/iA2v/4gNsAAoDbgAKA3AACgN+ABQAhwAm/84AKv/OADL/zgA0/84AN/+6ADj/7AA5/8QAOv/EADz/xACJ/84AlP/OAJX/zgCW/84Al//OAJj/zgCa/84Am//sAJz/7ACd/+wAnv/sAJ//xADI/84Ayv/OAMz/zgDO/84A3v/OAOD/zgDi/84A5P/OAQ7/zgEQ/84BEv/OART/zgEk/7oBJv+6ASr/7AEs/+wBLv/sATD/7AEy/+wBNP/sATb/xAE4/8QBOv/EAUf/zgFm/9gBbf/YAXH/ugFy/8QBc//OAXX/xAF4/8QBhf/sAZ3/ugGf/84Bpv+6Abj/zgG7/84BvP+6Ab7/2AHB/7ABxP+6Adz/zgHh/8QB5P/OAfr/xAH8/8QB/v/EAgD/xAJR/8QCXP/OAl7/7AJp/84Cef+wAnv/zgJ9/8QCf//EAoH/zgKD/84Chf/OAof/zgKJ/84Cpv+6Aqf/zgKu/84CsP/OArL/ugKz/84CtP/EArb/xAK6/7oCu//OArz/sAK9/8QCvv+wAr//xALC/8QCxP/EAtH/sALS/8QC7P/OAu7/zgLw/84C+v+wAvv/xAMK/8QDC//OAwz/xAMN/84DEv/OAxT/ugMV/84DRv/OA0j/zgNK/84DTP/OA07/zgNQ/84DUv/OA1T/zgNW/84DWP/OA1r/zgNc/84DXv/sA2D/7ANi/+wDZP/sA2b/7ANo/+wDav/sA2z/xANu/8QDcP/EA37/ugAEAXH/7AFy//YBeP/2AlH/9gAEAA//4gAR/+ICCP/iAgz/4gAlAA//xAAR/8QBVv/EAV//xAFi/8QBZv/sAWn/xAFt/+wBc//iAXb/9gF5/84Bev/YAXv/4gF8/+IBff/iAX7/zgGB/+IBgv/YAYT/4gGG/+IBh//iAYn/4gGM/84Bjv/OAY//zgGQ/84Bkv/iAZP/zgGV/+IBlv/iAZj/4gGZ/84Bmv/iAZv/4gII/8QCDP/EAiH/9gA1AAX/ugAK/7oBz//sAdj/7AHb/+wB3P/OAd3/4gHe/+wB4f/iAeT/zgHq/+wB7f/sAfb/4gIH/7oCC/+6Amf/7AJq/+wCev/2Anz/7AJ+/+wCgP/sAoL/7AKE/+wChv/sAoj/7AKK/+wCp//OAq//7AKx/+wCs//OArX/7AK3/+wCu//OAr3/4gK//+ICw//sAsX/7ALS/+IC3f/sAu3/7ALv/+wC8f/sAvX/4gL3/+IC+f/iAvv/4gMH/+wDCf/sAwv/xAMN/8QDE//sAxX/zgMX/+wAAgMK//YDDP/2ABgAD/+6ABH/ugGk/+IBqv/YAa7/4gG1/+IBzv/sAdX/7AHy/+wCCP+6Agz/ugJv/9gCcP/sAsv/4gLM/+wC1v/YAtj/2ALa/9gDBv/YAwj/2AMO/+IDD//sAxj/4gMZ/+wAHgAF/9gACv/YAZ3/7AGm/+wBvP/YAcH/2AHE/+wB3P/sAeT/7AIH/9gCC//YAnn/2AJ9/+ICf//iAqb/7AKn/+wCsv/YArP/7AK0/+ICtv/iArr/7AK7/+wCvP/YAr7/2ALR/9gC+v/YAwr/zgMM/84DFP/sAxX/7AAnAAX/xAAK/8QB0P/sAdz/zgHd/+IB3//sAeH/2AHk/84B9v/iAgf/xAIL/8QCav/sAn7/7AKA/+wCiP/sAp3/7AKn/84Cs//OArX/4gK3/+ICuf/sArv/zgK9/9gCv//YAsP/7ALF/+wCyP/sAtL/2ALj/+wC5//sAvX/4gL3/+IC+f/iAvv/2AMD/+wDBf/sAwv/zgMN/84DFf/OACEAD/9+ABH/fgGk/8QBqv/OAa7/xAGw/+wBtf/EAb//7AHO/84B1f/OAfL/zgII/34CDP9+Am//zgJw/84Cc//2Apz/7AK4/+wCx//sAsv/xALM/84C1v/OAtj/zgLa/84C4v/sAwL/7AME/+wDBv/YAwj/2AMO/8QDD//OAxj/xAMZ/84AJQAF/+IACv/iAZ3/4gGm/+IBvP/iAcH/2AHE/+IB3P/sAeH/7AHk/+wCB//iAgv/4gJ5/9gCff/iAn//4gKm/+ICp//sArL/4gKz/+wCtP/sArb/7AK6/+ICu//sArz/2AK9/+wCvv/YAr//7ALR/9gC0v/sAvr/2AL7/+wDCv/sAwv/4gMM/+wDDf/iAxT/4gMV/+wABgAF/+IACv/iAgf/4gIL/+IDC//sAw3/7AApAAX/sAAK/7ABnf/OAab/zgG8/6YBwf/EAcT/zgHc/9gB4f/sAeT/2AIH/7ACC/+wAnn/xAJ9/7oCf/+6Aqb/zgKn/9gCsv+mArP/2AK0/84Ctv/OArr/zgK7/9gCvP/EAr3/7AK+/8QCv//sAsL/4gLD/+wCxP/iAsX/7ALR/8QC0v/sAvr/xAL7/+wDCv+mAwv/2AMM/6YDDf/YAxT/zgMV/9gAGQAF/7oACv+6Adz/zgHh/+wB5P/OAgf/ugIL/7oCav/sAn7/7AKA/+wCiP/sAqf/zgKz/84Ctf/sArf/7AK7/84Cvf/sAr//7ALD/+wCxf/sAtL/7AL7/+wDC/+6Aw3/ugMV/84ADgGd/+wBpv/sAbz/4gHE/+wCff/2An//9gKm/+wCsv/iArT/9gK2//YCuv/sAwr/7AMM/+wDFP/sABwBn//sAbj/7AG7/+wBvv/sAeH/7AJp/+wCe//sAoH/7AKD/+wChf/sAof/7AKJ/+wCrv/sArD/7AK9/+wCv//sAsL/7ALE/+wC0v/sAuz/7ALu/+wC8P/sAvv/7AMG/+wDCP/sAwv/7AMN/+wDEv/sAIMAD//EABH/xAGf/+wBpP/YAar/xAGu/9gBtf/YAbj/7AG7/+wBvv/iAcr/2AHM/+IBzf/iAc7/zgHP/84B0v/iAdP/4gHU/+IB1f/OAdb/4gHX/+IB2P/OAdn/4gHa/+IB2//OAd7/zgHg/+IB4f/YAeL/4gHj/+IB5f/iAeb/4gHo/+IB6f/sAer/zgHrABQB7P/iAe3/zgHu/9gB8v/OAfP/4gH0ABQB9f/iAff/4gH5/+ICCP/EAgz/xAJn/84CaP/iAmn/7AJu/+ICb//EAnD/zgJy/+ICdP/sAnb/4gJ6/+wCe//sAnz/zgKB/+wCgv/OAoP/7AKE/84Chf/sAob/zgKH/+wCif/sAor/zgKT/+IClQAUApf/4gKb/+ICof/iAqP/4gKlABQCqf/iAqv/4gKt/+ICrv/sAq//zgKw/+wCsf/OAr3/2AK//9gCwf/iAsP/2ALF/9gCyv/iAsv/2ALM/84Czv/iAtD/4gLS/9gC1P/iAtb/xALX/9gC2P/EAtn/2ALa/8QC2//YAt3/zgLe//YC3//YAuD/9gLh/9gC6f/iAuv/4gLs/+wC7f/OAu7/7ALv/84C8P/sAvH/zgL7/9gC/f/iAv//4gMG/9gDB//OAwj/2AMJ/84DC//sAw3/7AMO/9gDD//OAxH/4gMS/+wDE//OAxb/9gMX/84DGP/YAxn/zgALAA//2AAR/9gBzv/2AdX/9gHy//YCCP/YAgz/2AJw//YCzP/2Aw//9gMZ//YAKgAF/+IACv/iAZ3/7AGm/+wBvP/EAcH/2AHE/+wB3P/sAd3/9gHh//YB5P/sAfb/9gIH/+ICC//iAnn/2AJ9/+ICf//iAqb/7AKn/+wCsv/EArP/7AK0/84Ctv/OArr/7AK7/+wCvP/YAr3/9gK+/9gCv//2AtH/2ALS//YC9f/2Avf/9gL5//YC+v/YAvv/9gMK/9gDC//sAwz/2AMN/+wDFP/sAxX/7AATAAX/zgAK/84B3P/sAd3/7AHk/+wB9v/sAgf/zgIL/84Cp//sArP/7AK1/+wCt//sArv/7AL1/+wC9//sAvn/7AML/9gDDf/YAxX/7AAIAbz/7AJ9//YCf//2ArL/7AK0//YCtv/2Awr/9gMM//YAKQAF/+IACv/iAbz/7AHB/9gB3P/iAeH/7AHk/+ICB//iAgv/4gJq//YCef/YAn3/7AJ+//YCf//sAoD/9gKI//YCp//iArL/7AKz/+ICtP/sArX/9gK2/+wCt//2Arv/4gK8/9gCvf/sAr7/2AK//+wCwv/iAsP/7ALE/+ICxf/sAtH/2ALS/+wC+v/YAvv/7AMK/+wDC//iAwz/7AMN/+IDFf/iAC8ABf+6AAr/ugGd/84Bpv/OAbz/ugG+/+wBwf/OAcT/zgHc/+wB4f/sAeT/7AIH/7oCC/+6Amv/7AJ5/84Cff/YAn//2AKU/+wCmP/sAqT/7AKm/84Cp//sArL/ugKz/+wCtP/EArb/xAK6/84Cu//sArz/zgK9/+wCvv/OAr//7ALC/84CxP/OAtH/zgLS/+wC3v/sAuD/7AL6/84C+//sAwD/7AMK/7oDC//sAwz/ugMN/+wDFP/OAxX/7AAdAc//7AHY/+wB2//sAd7/7AHh/+wB6v/sAe3/7AJn/+wCfP/sAoL/7AKE/+wChv/sAor/7AKv/+wCsf/sAr3/7AK//+wCw//sAsX/7ALS/+wC3f/sAu3/7ALv/+wC8f/sAvv/7AMH/+wDCf/sAxP/7AMX/+wACgAP/9gAEf/YAgj/2AIM/9gCff/2An//9gK0//YCtv/2Awr/7AMM/+wAAQHpABQABgAF//YACv/2Agf/9gIL//YDC//sAw3/7AA9AA//2AAR/9gBnf/2AaT/7AGm//YBqP/sAar/7AGu/+wBsP/sAbH/9gG1/+wBvP/iAb3/7AG//+wBwf/sAcT/9gHH//YBzv/2AdX/9gHy//YCCP/YAgz/2AJv/+wCcP/2Anf/9gJ5/+wCff/2An//9gKc/+wCnv/2Aqb/9gKy/+ICtP/2Arb/9gK4/+wCuv/2Arz/7AK+/+wCx//sAsv/7ALM//YC0f/sAtb/7ALY/+wC2v/sAuL/7ALk//YC8v/2AvT/7AL2/+wC+P/sAvr/7AMC/+wDBP/sAwr/7AMM/+wDDv/sAw//9gMU//YDGP/sAxn/9gAeAAX/9gAK//YB0P/sAdz/9gHd//YB3//sAeH/9gHk//YB9v/2Agf/9gIL//YCnf/sAqf/9gKz//YCuf/sArv/9gK9//YCv//2Asj/7ALS//YC4//sAvX/9gL3//YC+f/2Avv/9gMD/+wDBf/sAwv/9gMN//YDFf/2AE4AD//EABH/xAGf//YBpP/OAar/ugGu/84Btf/OAbj/9gG7//YBvv/iAcn/9gHO/9gBz//sAdX/2AHY/+wB2//sAd7/7AHh/+wB6v/sAesAMgHt/+wB7v/2AfL/2AH0ADICCP/EAgz/xAJn/+wCaf/2Am//ugJw/9gCe//2Anz/7AKB//YCgv/sAoP/9gKE/+wChf/2Aob/7AKH//YCif/2Aor/7AKVADICpQAyAq7/9gKv/+wCsP/2ArH/7AK9/+wCv//sAsL/7ALD/+ICxP/sAsX/4gLL/84CzP/YAtL/7ALW/7oC2P+6Atr/ugLd/+wC7P/2Au3/7ALu//YC7//sAvD/9gLx/+wC+//sAwb/ugMH/+wDCP+6Awn/7AMO/84DD//YAxL/9gMT/+wDF//sAxj/zgMZ/9gACwAP/9gAEf/YAc7/7AHV/+wB8v/sAgj/2AIM/9gCcP/sAsz/7AMP/+wDGf/sAJ0AD//EABD/2AAR/8QBn//sAaT/zgGq/7oBrv/OAbX/zgG4/+wBu//sAbwAFAG+/9gBzP/OAc3/zgHO/8QBz/+6AdD/7AHR/+wB0v/OAdP/zgHU/84B1f/EAdb/zgHX/84B2P+6Adn/zgHa/84B2/+6Adz/2AHd/9gB3v+6Ad//7AHg/84B4f/OAeL/zgHj/84B5P/YAeX/zgHm/84B5//sAej/zgHp/+IB6v+6Aez/zgHt/7oB7v/EAfL/xAHz/84B9f/OAfb/2AH3/84B+f/OAgL/2AID/9gCBP/YAgj/xAIM/8QCZ/+6Amj/zgJp/+wCav/sAm7/zgJv/7oCcP/EAnL/zgJ0/84Cdv/OAnr/zgJ7/+wCfP+6An7/7AKA/+wCgf/sAoL/ugKD/+wChP+6AoX/7AKG/7oCh//sAoj/7AKJ/+wCiv+6ApP/zgKX/84Cm//OAp3/7AKf/+wCof/OAqP/zgKn/9gCqf/OAqv/zgKt/84Crv/sAq//ugKw/+wCsf+6ArIAFAKz/9gCtf/YArf/2AK5/+wCu//YAr3/zgK//84Cwf/OAsL/zgLD/7oCxP/OAsX/ugLI/+wCyv/OAsv/zgLM/8QCzv/OAtD/zgLS/84C1P/OAtb/ugLY/7oC2v+6At3/ugLj/+wC5f/sAuf/4gLp/84C6//OAuz/7ALt/7oC7v/sAu//ugLw/+wC8f+6AvP/7AL1/9gC9//YAvn/2AL7/84C/f/OAv//zgMD/+wDBf/sAwb/ugMH/7oDCP+6Awn/ugML/84DDf/OAw7/zgMP/8QDEf/OAxL/7AMT/7oDFf/YAxf/ugMY/84DGf/EACUAD//OABD/7AAR/84Bzv/iAc//9gHV/+IB2P/2Adv/9gHe//YB6v/2Ae3/9gHy/+ICAv/sAgP/7AIE/+wCCP/OAgz/zgJn//YCcP/iAnz/9gKC//YChP/2Aob/9gKK//YCr//2ArH/9gLM/+IC3f/2Au3/9gLv//YC8f/2Awf/9gMJ//YDD//iAxP/9gMX//YDGf/iAJQAD//OABD/7AAR/84BnQAUAZ//7AGk/9gBpgAUAar/xAGu/9gBtf/YAbj/7AG7/+wBvAAUAb7/4gHEABQBzP/iAc3/4gHO/84Bz//YAdD/7AHR/+wB0v/iAdP/4gHU/+IB1f/OAdb/4gHX/+IB2P/YAdn/4gHa/+IB2//YAd7/2AHf/+wB4P/iAeH/zgHi/+IB4//iAeX/4gHm/+IB5//sAej/4gHq/9gB6wAUAez/4gHt/9gB7v/iAfL/zgHz/+IB9AAUAfX/4gH3/+IB+f/iAgL/7AID/+wCBP/sAgj/zgIM/84CZ//YAmj/4gJp/+wCbv/iAm//xAJw/84Ccv/iAnT/7AJ2/+ICev/iAnv/7AJ8/9gCgf/sAoL/2AKD/+wChP/YAoX/7AKG/9gCh//sAon/7AKK/9gCk//iApUAFAKX/+ICm//iAp3/7AKf/+wCof/iAqP/4gKlABQCpgAUAqn/4gKr/+ICrf/iAq7/7AKv/9gCsP/sArH/2AKyABQCuf/sAroAFAK9/84Cv//OAsH/4gLC/+wCw//iAsT/7ALF/+ICyP/sAsr/4gLL/9gCzP/OAs7/4gLQ/+IC0v/OAtT/4gLW/8QC2P/EAtr/xALd/9gC4//sAuX/7ALp/+IC6//iAuz/7ALt/9gC7v/sAu//2ALw/+wC8f/YAvP/7AL7/84C/f/iAv//4gMD/+wDBf/sAwb/zgMH/9gDCP/OAwn/2AML/+wDDf/sAw7/2AMP/84DEf/iAxL/7AMT/9gDFAAUAxf/2AMY/9gDGf/OACEAD//iABH/4gHO/+IBz//sAdX/4gHY/+wB2//sAd7/7AHq/+wB7f/sAfL/4gII/+ICDP/iAmf/7AJw/+ICfP/sAoL/7AKE/+wChv/sAor/7AKv/+wCsf/sAsz/4gLd/+wC7f/sAu//7ALx/+wDB//sAwn/7AMP/+IDE//sAxf/7AMZ/+IAJQGf/+wBowBuAbj/7AG7/+wBvv/iAdz/7AHh/9gB5P/sAmn/7AJ4AB4Cev/2Anv/7AKB/+wCg//sAoX/7AKH/+wCif/sAqf/7AKu/+wCsP/sArP/7AK7/+wCvf/YAr//2ALC/+ICw//sAsT/4gLF/+wC0v/YAuz/7ALu/+wC8P/sAvv/2AML/+wDDf/sAxL/7AMV/+wAIwGf/+wBuP/sAbv/7AG+/+wBwf/sAeH/7AJp/+wCef/sAnv/7AKB/+wCg//sAoX/7AKH/+wCif/sAq7/7AKw/+wCvP/sAr3/7AK+/+wCv//sAsL/zgLE/84C0f/sAtL/7ALs/+wC7v/sAvD/7AL6/+wC+//sAwb/7AMI/+wDC//sAw3/7AMS/+wDFv/2AB0Bz//2Adj/9gHb//YB3v/2AeH/9gHq//YB7f/2Amf/9gJ8//YCgv/2AoT/9gKG//YCiv/2Aq//9gKx//YCvf/2Ar//9gLS//YC3f/2Au3/9gLv//YC8f/2Avv/9gMH//YDCf/2Awv/7AMN/+wDE//2Axf/9gBAAAX/zgAK/84Bnf/YAab/2AGo/+IBqv/iAbD/4gG8/7oBvf/iAb//4gHB/+IBxP/YAdD/7AHc/+IB3//sAeH/7AHk/+ICB//OAgv/zgJv/+ICc//sAnn/4gJ9/+ICf//iApz/4gKd/+wCpv/YAqf/4gKy/7oCs//iArT/4gK2/+ICuP/iArn/7AK6/9gCu//iArz/4gK9/+wCvv/iAr//7ALH/+ICyP/sAtH/4gLS/+wC1v/iAtj/4gLa/+IC4v/iAuP/7AL0/+IC9v/iAvj/4gL6/+IC+//sAwL/4gMD/+wDBP/iAwX/7AMK/+wDC//sAwz/7AMN/+wDFP/YAxX/4gAXAA//2AAR/9gBqv/2AbD/7AG8/+wBv//sAgj/2AIM/9gCb//2An3/9gJ///YCnP/sArL/7AK0//YCtv/2Arj/7ALH/+wC1v/2Atj/9gLa//YC4v/sAwL/7AME/+wAEwAF/+wACv/sAdD/9gHd//YB3//2Afb/9gIH/+wCC//sAp3/9gK5//YCyP/2AuP/9gL1//YC9//2Avn/9gMD//YDBf/2Awv/7AMN/+wAKwAF/9gACv/YAZ3/4gGm/+IBqv/sAbD/7AG8/+IBv//sAcH/7AHE/+IB3P/sAeT/7AIH/9gCC//YAm//7AJ5/+wCff/sAn//7AKc/+wCpv/iAqf/7AKy/+ICs//sArT/7AK2/+wCuP/sArr/4gK7/+wCvP/sAr7/7ALH/+wC0f/sAtb/7ALY/+wC2v/sAuL/7AL6/+wDAv/sAwT/7AMK/+wDDP/sAxT/4gMV/+wAHgAF/84ACv/OAdD/7AHc/+IB3f/sAd//7AHh/+wB5P/iAfb/7AIH/84CC//OAp3/7AKn/+ICs//iArn/7AK7/+ICvf/sAr//7ALI/+wC0v/sAuP/7AL1/+wC9//sAvn/7AL7/+wDA//sAwX/7AML/84DDf/OAxX/4gAFAeH/7AK9/+wCv//sAtL/7AL7/+wABAGjAG4C5wAUAwv/7AMN/+wAPAAF/7oACv+6ACb/7AAq/+wALQCCADL/7AA0/+wAN/+6ADn/2AA6/9gAPP/EAIn/7ACU/+wAlf/sAJb/7ACX/+wAmP/sAJr/7ACf/8QAyP/sAMr/7ADM/+wAzv/sAN7/7ADg/+wA4v/sAOT/7AEO/+wBEP/sARL/7AEU/+wBJP+6ASb/ugE2/9gBOP/EATr/xAFH/+wB+v/YAfz/2AH+/9gCAP/EAgf/ugIL/7oCXP/sA0b/7ANI/+wDSv/sA0z/7ANO/+wDUP/sA1L/7ANU/+wDVv/sA1j/7ANa/+wDXP/sA2z/xANu/8QDcP/EA37/ugAEAAX/9gAK//YCB//2Agv/9gABAC0APAARAAX/9gAK//YAWf/sAFr/7ABb/+wAXP/sAF3/9gC//+wBN//sATz/9gE+//YBQP/2Afv/7AH9/+wCB//2Agv/9gNt/+wANAAP/9gAEf/YACT/7AA3/+IAOf/2ADr/9gA7/+wAPP/2AD3/9gCC/+wAg//sAIT/7ACF/+wAhv/sAIf/7ACf//YAwv/sAMT/7ADG/+wBJP/iASb/4gE2//YBOP/2ATr/9gE7//YBPf/2AT//9gFD/+wBoP/2Afr/9gH8//YB/v/2AgD/9gII/9gCDP/YAlX/7AMa/+wDHP/sAx7/7AMg/+wDIv/sAyT/7AMm/+wDKP/sAyr/7AMs/+wDLv/sAzD/7ANs//YDbv/2A3D/9gN+/+IAEgBJACgAVwAoAFkAMgBaADIAWwAyAFwAMgC/ADIBJQAoAScAKAE3ADIB+wAyAf0AMgI0ACgCNQAoAloAKAJbACgDbQAyA38AKAAcAA//7AAR/+wAJP/2AIL/9gCD//YAhP/2AIX/9gCG//YAh//2AML/9gDE//YAxv/2AUP/9gII/+wCDP/sAlX/9gMa//YDHP/2Ax7/9gMg//YDIv/2AyT/9gMm//YDKP/2Ayr/9gMs//YDLv/2AzD/9gASAEkAMgBXADIAWQAyAFoAMgBbADIAXAAyAL8AMgElADIBJwAyATcAMgH7ADIB/QAyAjQAMgI1ADICWgAyAlsAMgNtADIDfwAyALkAD//EABH/xAAiABQAJP/EACb/7AAq/+wAMv/sADT/7ABE/84ARv/OAEf/zgBI/84ASv/sAFD/4gBR/+IAUv/OAFP/4gBU/84AVf/iAFb/2ABY/+IAXf/sAIL/xACD/8QAhP/EAIX/xACG/8QAh//EAIn/7ACU/+wAlf/sAJb/7ACX/+wAmP/sAJr/7ACi/84Ao//OAKT/zgCl/84Apv/OAKf/zgCo/84Aqf/OAKr/zgCr/84ArP/OAK3/zgC0/84Atf/OALb/zgC3/84AuP/OALr/zgC7/+IAvP/iAL3/4gC+/+IAwv/EAMP/zgDE/8QAxf/OAMb/xADH/84AyP/sAMn/zgDK/+wAy//OAMz/7ADN/84Azv/sAM//zgDR/84A0//OANX/zgDX/84A2f/OANv/zgDd/84A3v/sAN//7ADg/+wA4f/sAOL/7ADj/+wA5P/sAOX/7AD6/+IBBv/iAQj/4gEN/+IBDv/sAQ//zgEQ/+wBEf/OARL/7AET/84BFP/sARX/zgEX/+IBGf/iAR3/2AEh/9gBK//iAS3/4gEv/+IBMf/iATP/4gE1/+IBPP/sAT7/7AFA/+wBQ//EAUT/zgFG/84BR//sAUj/zgFK/9gCCP/EAgz/xAJU/+ICVf/EAlb/zgJc/+wCXf/OAl//4gMa/8QDG//OAxz/xAMd/84DHv/EAx//zgMg/8QDIv/EAyP/zgMk/8QDJf/OAyb/xAMn/84DKP/EAyn/zgMq/8QDK//OAyz/xAMt/84DLv/EAy//zgMw/8QDMf/OAzP/zgM1/84DN//OAzn/zgM9/84DP//OA0H/zgNG/+wDR//OA0j/7ANJ/84DSv/sA0v/zgNM/+wDTv/sA0//zgNQ/+wDUf/OA1L/7ANT/84DVP/sA1X/zgNW/+wDV//OA1j/7ANZ/84DWv/sA1v/zgNc/+wDXf/OA1//4gNh/+IDY//iA2X/4gNn/+IDaf/iA2v/4gAJAAUAKAAKACgAD//YABH/2AAiABQCBwAoAgj/2AILACgCDP/YAMoAD//EABD/2AAR/8QAIgAUACT/ugAm/+wAKv/sADL/7AA0/+wANwAUAET/sABG/7oAR/+6AEj/ugBK/7oAUP/OAFH/zgBS/7oAU//OAFT/ugBV/84AVv/EAFj/zgBZ/+wAWv/sAFv/7ABc/+wAXf/YAIL/ugCD/7oAhP+6AIX/ugCG/7oAh/+6AIn/7ACU/+wAlf/sAJb/7ACX/+wAmP/sAJr/7ACi/7oAo/+wAKT/sACl/7AApv+wAKf/sACo/7AAqf+6AKr/ugCr/7oArP+6AK3/ugC0/7oAtf+6ALb/ugC3/7oAuP+6ALr/ugC7/84AvP/OAL3/zgC+/84Av//sAML/ugDD/7AAxP+6AMX/sADG/7oAx/+wAMj/7ADJ/7oAyv/sAMv/ugDM/+wAzf+6AM7/7ADP/7oA0f+6ANP/ugDV/7oA1/+6ANn/ugDb/7oA3f+6AN7/7ADf/7oA4P/sAOH/ugDi/+wA4/+6AOT/7ADl/7oA+v/OAQb/zgEI/84BDf/OAQ7/7AEP/7oBEP/sARH/ugES/+wBE/+6ART/7AEV/7oBF//OARn/zgEd/8QBIf/EASQAFAEmABQBK//OAS3/zgEv/84BMf/OATP/zgE1/84BN//sATz/2AE+/9gBQP/YAUP/ugFE/7ABRv+wAUf/7AFI/7oBSv/EAfv/7AH9/+wCAv/YAgP/2AIE/9gCCP/EAgz/xAJU/84CVf+6Alb/sAJc/+wCXf+6Al//zgMa/7oDG/+wAxz/ugMd/7ADHv+6Ax//sAMg/7oDIv+6AyP/sAMk/7oDJf+wAyb/ugMn/7ADKP+6Ayn/sAMq/7oDK/+wAyz/ugMt/7ADLv+6Ay//sAMw/7oDMf+wAzP/ugM1/7oDN/+6Azn/ugM9/7oDP/+6A0H/ugNG/+wDR/+6A0j/7ANJ/7oDSv/sA0v/ugNM/+wDTv/sA0//ugNQ/+wDUf+6A1L/7ANT/7oDVP/sA1X/ugNW/+wDV/+6A1j/7ANZ/7oDWv/sA1v/ugNc/+wDXf+6A1//zgNh/84DY//OA2X/zgNn/84Daf/OA2v/zgNt/+wDfgAUAAQABQAUAAoAFAIHABQCCwAUABEAD//YABH/2AFW/+wBX//sAWL/7AFk//YBaf/sAXD/9gFx/+IBcv/2AXT/7AF1//YBeP/2AYj/9gII/9gCDP/YAlH/9gAEAAAAAQAIAAEADABAAAEBFgIIAAIACAluCW8AAAmwCbMAAgm6CboABgm8Cb0ABwoPCg8ACQsICxMACgsgCyAAFgvjC+cAFwABAGkJdAl2CXcJeAl5CXwJggmGCYgJjAmNCY4JjwmTCZgJoAmhCb4JwgnDCcQJxgnHCdwJ5QnnCegJ6QnqCe0J8wn0CfcJ+Qn8Cf0KAQpXClsKXQphCmIKYwpkCmgKbApzCnsKfwqBCoUKhgqHCogKjAqQCpcK+wr8Cv0K/gr/CwALAQsCCwMLBAs1CzgLOQs6CzsLPQs/C0ALQQtDC0cLSAtJC0oLSwtMC04LXAtdC14LXwtiC2MLZAtlC2YLZwtoC7oLuwu8C70Lvgu/C8ALwQvhC+IAHAAAAMoAAAByAAAA4gAAANoAAADiAAAA6gAAAHoAAAB6AAAAegAAAIIAAADKAAAAwgAAAIoAAACiAAAAkgAAAJoAAADKAAAA2gAAAKIAAACqAAAAsgAAALoAAADCAAAAygAAANIAAADaAAAA4gAAAOoAAv5JBPoADAAC/kkE+gAEAAL+SQT6ABEAAv5JBPoAKQAC/kkE+gAjAAL+SQT6AC8AAv5JBPoAJAAC/kkE+gAnAAL+SQT6ACYAAv5JBPoAMgAC/kkE+gAdAAL+SQT6ABoAAv5JBPoAJQAC/kkE+gAYAAL+SQT6AA4AAv5JBPoAGwBpAOwA9AD8AQQBDAEUANQBNAI8AkQCTAJUATwBdADcAVQBVADUAlQBPADcARwBJADkAOwA9AD8AQQBDAEUARwBJAE0AjwCRAJMAXQBLAE0AjwCRAJMAlQBPAFEAUwBVAEsATQCPAJEAkwCVAE8AUQBTAFUAVwBZAF0AXQBfAFcAWQBbAF0AXwBhAGMAZQBnAGkAawBtAG8AcQBzAHUAdwB5AHsAfQB/AJ0AgQCBAIMAhQCHAIkAmwCbAJsAmwCLAI0AjwCRAJMAlQCXAJkAmwCdAJ8AAIDDQT6AC8AAgMKBPoAKwACA7kE+gAxAAICVgT6ADMAAgMFBPoAJAACAwUE+gA1AAIDGAT6AD4AAgPOBPoAOgACA0EE+gAsAAIDGAT6AE8AAgPOBPoATQACAy0E+gAvAAIDkQT6ADgAAgJ4BPoAKwACAjAE+gArAAIDCgT6ACkAAgQJBPoANgACAcsE+gAsAAIBywT6ADgAAgIwBPoATgACAjAE+gAqAAICMAT6ADoAAgQlBPoAKwACAj0E+gAzAAICPQT6AEkAAgI9BPoAOQACAj0E+gBPAAICPgT6AD8AAgO5BPoAUAACA7kE+gBOAAIDuQT6AGYAAgJ4BPoATwACA/oE+gBOAAICMAT6ADcAAgNRBPoAPwACBKoE+gBOAAIDPQT6ADsAAgSfBPoAVgACA1ME+gAuAAIEcwT6AEYAAgPnBPoAQQACBLQE+gBLAAIESwT6AEkAAgcrBPoATwACA5EE+gA3AAIDwgT6AEEAAgI9BPoAHAACAj4E+gAiAAIDuQT6ACwAAgJ4BPoAKgACBAkE+gA0AAIDzgT6AC4AAgJ9BPoAOwACBOME+gBOAAQAAAABAAgAAQAMAEYAAQCiAkAAAgAJCawJrwAACbgJuAAECcgJyQAFC6oLuQAHC8sLzAAXC84LzwAZC9EL0gAbC9QL2AAdC9oL4AAiAAEALAmCCYYJiAmMCY0JjgmPCZMJmAmdCZ4JoAmhCb4JxAncCgEKVwpoCmwKewqMCpALNQtHC0gLSQtKC0sLTAtOC1oLXAtkC2gLugu7C7wLvQu+C78LwAvBC+IAKQAAAW4AAAFuAAABdgAAAKYAAAGWAAAArgAAALYAAAC+AAAAxgAAAM4AAADWAAAA3gAAAOYAAADuAAAA9gAAAP4AAAEGAAABDgAAARYAAAEeAAABJgAAAS4AAAFuAAABNgAAATYAAAE+AAABPgAAAUYAAAFGAAABTgAAAVYAAAFeAAABZgAAAWYAAAFuAAABbgAAAXYAAAF+AAABhgAAAY4AAAGWAAL+SQAAACEAAv5JAAAANAAC/kkAAAA9AAL+SQAAACUAAv2zAAAAJQAC/kkAAAAZAAL+SQAAACQAAv2zAAAAJAAC/kkAAAAdAAL9swAAAB0AAv5JAAAALQAC/bMAAAAtAAL9swAAADQAAv5JAAAAQAAC/bMAAABAAAL+SQAAAEkAAv2zAAAAPQAC/bMAAABJAAL+FwAAAB8AAv4XAAAAIAAC/hcAAAAZAAL+FwAAACoAAv4XAAAAOwAC/hcAAABEAAL+FwAAABUAAv5JAAAAGAAC/kkAAAARAAL+SQAAAB8AAv3RAAAANwAC/dEAAAA9AAL+SQAAAAwALAB6AFoBCgESARoBIgBiAIoAogBqAGoAcgByAHoAogCCAIoAkgCaAKIAkgCaAKIAqgCyALoAwgDKANIA2gDiAOoA8gE6APoBAgEKARIBGgEiASoBMgE6AUIAAgLrAAAAOQACAngAAAAsAAIB3QAAABsAAgQJAAAANwACAw0AAAAuAAIC6wAAADIAAgM7AAAAKwACAy0AAAACAAIDOwAAACwAAgMKAAAAAgACBCUAAAAYAAIFFQAAAE8AAgNnAAAAOAACBGsAAABAAAIFxAAAAE8AAgRXAAAAPAACBbkAAABXAAIDVAAAADwAAgKBAAAAKQACAoEAAAAvAAIHKwAAAFAAAgLrAAAAOAACA0UAAABCAAICPQAAAB0AAgI+AAAAIwACAusAAAAtAAICeAAAACsAAgQJAAAANQACA84AAAAvAAIF5wAAAE8AAgAIAAEACAABADgABAAAABcAagCgALYAxADOAPwBYgFoAYoBzAHeAgACKgI4AmYCbAKuArQCvgLoAyYDPANKAAEAFwoRChIKFAoWChgKGQoaCiAKJAolCiYKJwooCikKKgosCi0KLgowCjEKMgozC0YADQmC/5wJg/+cCYn/iAmM/5wJkP+ICZH/iAmT/5wJl/+ICZj/iAmc/4gJn/+cCaL/nAml/5wABQmD/4gJkf9WCZv/kgmc/4gJov+IAAMJlf/YCZv/2Amc/9gAAgmV/8QJnP/iAAsJgv+4CYr/fgmM/6YJjv9+CZH/nAmT/5wJlf/sCZn/uAmb/7AJnP/ECaL/uAAZCYL/nAmD/4gJhP9+CYX/TAmH/8QJiP9MCYn/dAmM/4gJjf+ICY//iAmQ/2oJkf+ICZP/iAmV/2oJl/9qCZj/agmZ/5wJm/+SCZz/iAmf/34JoP+cCaL/nAmk/2oJpf+ICab/agABCYn/6wAICYL/TAmD/2oJl/+ICZj/iAmc/7AJn/9MCaL/TAml/2oAEAmC/4gJiP+ICYz/kgmR/4gJk/+ICZf/fgmY/34JnP9WCaL/iAml/5IJpv90Cmj/iAr9/4gK/v+IC0v+1AtM/84ABAmX/9gJmP/YCaT/2Amm/+wACAmJ/4gJjP+cCZH/iAmX/4gJmP+ICZz/iAmf/5wJpf+cAAoJif/YCYr/2AmR/5IJk/+mCZn/pgmb/4gJnP/OCZ//pgmi/6YJpf/sAAMJnP+cCZ//sAmi/6YACwmR/4gJk/+SCZX/kgmX/34Jmf+mCZv/YAmc/34Jn/+cCaL/pgml/34Jpv9qAAEJnP/OABAJgv90CYn/agmM/4gJjf+ICY7/pgmP/4gJkf+SCZP/iAmX/2oJmP9qCZn/dAmb/9gJnP+6CZ//dAmi/3QJpv9gAAEJnP84AAIJn/+mCaL/pgAKCYL/ugmM/5wJjf+cCZD/dAmX/3QJmP90CZv/ugmc/5IJov/ECaT/dAAPCYL/pgmD/1YJif90CYz/fgmR/34Jk/9+CZX/agmX/4gJmP+ICZn/pgmb/0wJnP9qCZ//nAmi/6YJpf9WAAUJkP/ECZv/kgmc/8QJn/+wCaL/pgADCZL/ugmU/7oJmv+mAAEJov9WAAgAAAABAAgAAgFeABABIgJ4AAIAAAEqAAIALQluCW8AAgmmCaYAAQmsCa8AAQmwCbMAAgm4CbgAAQnICckAAQoPCg8AAgoQChAAAQpbClsAAQpdCl0AAQphCmQAAQpzCnMAAQp4CngAAQp/Cn8AAQqBCoEAAQqFCogAAQqXCpcAAQrtCu0AAQrvCu8AAQrxCvcAAQr5CvkAAQr9Cv8AAQsCCwQAAQsICxMAAgsgCyAAAgs4CzgAAQs6CzoAAQs9Cz0AAQs/C0AAAQtDC0MAAQtHC04AAQtUC1QAAQtWC1YAAQtbC1sAAQtdC2MAAQtmC2cAAQuqC7AAAQuzC7MAAQu3C7cAAQvLC8sAAQvOC84AAQvRC9EAAQvUC9cAAQvaC9wAAQvgC+AAAQABCbsAAQABAAIABgAUAAEAAQABAAAAAQAAABAAAgACAAEAAQAAAAEAAAAQAAEAAAABAAgAAQAIAAL+IAABAAEJuwAIAAAAAQAIAAIBfgAQAP4BDAACAAABFAACACcJbglvAAEJcQlxAAEJdQl1AAEJegl7AAEJfQmBAAEJggmmAAMJqgmqAAQJqwmrAAIJsAm3AAEJvgnFAAMJ2AnYAAEJ4gniAAEJ5gnmAAEJ6wnsAAEJ7gnyAAEJ9Qn1AAEJ9goOAAMKDwoPAAEKVwqcAAMLBQs0AAELNQs1AAMLOAs4AAMLOgs6AAMLPAs9AAMLPwtAAAMLQwtDAAMLRQtFAAMLRwtMAAMLTgtQAAMLUgtUAAMLVgtWAAMLWQtnAAMLaQtpAAMLawtrAAMLbgt4AAQLnQudAAULngugAAILoQupAAEL4wvvAAEAAQm6AAQAAQAAAAEAAQABCWsAAQABAAQACgAYACYANgABAAEAAQAAAAEAAAASAAEAAgABAAAAAQAAABMAAgADAAQAAQAAAAEAAAATAAEABQABAAAAAQAAABIAAQAAAAEACAABAB4AAgH0AAEAAAABAAgAAgAOAAIAAwFAAMgAyAABAAMJugm8Cb0AAAABAAAACgDaAdYABWN5cmwAIGRldjIALGRldmEAdGdyZWsAuGxhdG4AxAAEAAAAAP//AAEAAAAKAAFNQVIgACgAAP//AAwADQAEABEAEAAGAAoACAAOAAMABwAPAAsAAP//AA0ADAANAAQAEQAQAAYACgAIAA4AAwAHAA8ACwAKAAFNQVIgACYAAP//AAsADQAEABEABQAJABIADgADAAcADwALAAD//wAMAAwADQAEABEABQAJABIADgADAAcADwALAAQAAAAA//8AAQABAAQAAAAA//8AAQACABNjY21wAHRjY21wAHRjY21wAHRhYnZzAH5ha2huAJBibHdmAJZibHdmAJxibHdzAKJjamN0ALBoYWxmALZoYWxmALxoYWxuAMRsb2NsAMpudWt0ANBwcmVzANZwc3RzAN5ya3JmAORycGhmAOx2YXR1APIAAAADAAAAAQACAAAABwAWABgAGQAaABwAUgBXAAAAAQAGAAAAAQAJAAAAAQARAAAABQBZAF4AXwBkAGUAAAABABMAAAABAAwAAAACAA0ADgAAAAEAbQAAAAEABAAAAAEABQAAAAIAFAAVAAAAAQBpAAAAAgAIAAoAAAABAAcAAAADAA8AEAARAG4A3gZaBtgIfAiaCLQLlgvIC+IP5BAEEDYQVhE+FMoXrBtGHrAe+B8UIEIiHiJmIsojkCO4JiwmYCZ0MEwzUDaYOS47cD2mP/pCBkQ2RmBIeEqcTMZO6lEUUxRU8FdkWThbHlzIXmBf5mEMYqBjcGQuZPJlpGZiZxRnzGiQabxqtGtebDpsrG0YbaJt6m5YckRyUnJgcm5yfHKKcphypnK0csJy0HLkc8Bz3nPsc/p0EnWUdah3HHc2eAx4LnnWe8Z9xn3agBiAYIB6gj6CcoKEgp6CuIQmhD6EVoR8AAQAAAABAAgAAQVqAAUAEAEiAjQDRgRYABwAOgBCAEoAUgBaAGIAagByAHoAggCIAJAAmACgAKgAsAC2AL4AxgDOANYA3gDkAOwA9AD8AQQBDAiPAAMEygTLCJAAAwTKBMwIkQADBMoEzQiSAAMEygTOCJMAAwTLBMoIlAADBMsEywiVAAMEywTMCJYAAwTLBM0IlwADBMsEzgiYAAIEywiZAAMEzATKCJoAAwTMBMsImwADBMwEzAicAAMEzATNCJ0AAwTMBM4IngACBMwInwADBM0EygigAAMEzQTLCKEAAwTNBMwIogADBM0EzQijAAMEzQTOCKQAAgTNCKUAAwTOBMoIpgADBM4EywinAAMEzgTMCKgAAwTOBM0IqQADBM4EzgiqAAIEzgAcADoAQgBKAFIAWgBiAGgAcAB4AIAAiACQAJgAoACoALAAtgC+AMYAzgDWAN4A5ADsAPQA/AEEAQwIqwADBMoEygisAAMEygTLCK0AAwTKBMwIrgADBMoEzQivAAMEygTOCLAAAgTKCLEAAwTLBMoIsgADBMsEzAizAAMEywTNCLQAAwTLBM4ItQADBMwEygi2AAMEzATLCLcAAwTMBMwIuAADBMwEzQi5AAMEzATOCLoAAgTMCLsAAwTNBMoIvAADBM0Eywi9AAMEzQTMCL4AAwTNBM0IvwADBM0EzgjAAAIEzQjBAAMEzgTKCMIAAwTOBMsIwwADBM4EzAjEAAMEzgTNCMUAAwTOBM4IxgACBM4AHAA6AEIASgBSAFoAYgBoAHAAeACAAIgAkACWAJ4ApgCuALYAvgDGAM4A1gDeAOQA7AD0APwBBAEMCMcAAwTKBMoIyAADBMoEywjJAAMEygTMCMoAAwTKBM0IywADBMoEzgjMAAIEygjNAAMEywTKCM4AAwTLBMsIzwADBMsEzAjQAAMEywTNCNEAAwTLBM4I0gACBMsI0wADBMwEygjUAAMEzATLCNUAAwTMBM0I1gADBMwEzgjXAAMEzQTKCNgAAwTNBMsI2QADBM0EzAjaAAMEzQTNCNsAAwTNBM4I3AACBM0I3QADBM4EygjeAAMEzgTLCN8AAwTOBMwI4AADBM4EzQjhAAMEzgTOCOIAAgTOABwAOgBCAEoAUgBaAGIAaABwAHgAgACIAJAAlgCeAKYArgC2AL4AxADMANQA3ADkAOoA8gD6AQIBCgjjAAMEygTKCOQAAwTKBMsI5QADBMoEzAjmAAMEygTNCOcAAwTKBM4I6AACBMoI6QADBMsEygjqAAMEywTLCOsAAwTLBMwI7AADBMsEzQjtAAMEywTOCO4AAgTLCO8AAwTMBMoI8AADBMwEywjxAAMEzATMCPIAAwTMBM0I8wADBMwEzgj0AAIEzAj1AAMEzQTKCPYAAwTNBMsI9wADBM0EzAj4AAMEzQTOCP4AAgTOCPkAAwTOBMoI+gADBM4Eywj7AAMEzgTMCPwAAwTOBM0I/QADBM4EzgAcADoAQgBKAFIAWgBiAGgAcAB4AIAAiACQAJYAngCmAK4AtgC+AMQAzADUANwA5ADsAPIA+gECAQoI/wADBMoEygkAAAMEygTLCQEAAwTKBMwJAgADBMoEzQkDAAMEygTOCQQAAgTKCQUAAwTLBMoJBgADBMsEywkHAAMEywTMCQgAAwTLBM0JCQADBMsEzgkKAAIEywkLAAMEzATKCQwAAwTMBMsJDQADBMwEzAkOAAMEzATNCQ8AAwTMBM4JEAACBMwJEQADBM0EygkSAAMEzQTLCRMAAwTNBMwJFAADBM0EzQkVAAMEzQTOCRYAAgTNCRcAAwTOBMoJGAADBM4EywkZAAMEzgTMCRoAAwTOBM0AAgABBMoEzgAAAAYAAAABAAgAAwAAAAECLgABABIAAQAAAAMAAgAQAmACYgAAAowCjwADA3MDcwAHBOQE9AAIBPoE+gAZBRwFHgAaBSEFIwAdBSUFJQAgBSkFKwAhBS8FMQAkBTYFNgAnBToFOgAoBUIFTgApBkgGSQA2BksGUQA4BlMGUwA/AAQAAAABAAgAAQGSAAMADABuAQAACAASABwAJgAwADoARABOAFgJGwAEBOUE8wJgCRwABATlBPMCYQkdAAQE5QT0AmAJHgAEBOUE9AJhCR8ABATnBPMCYAkgAAQE5wTzAmEJIQAEBOcE9AJgCSIABATnBPQCYQAMABoAJAAuADgAQgBMAFYAYABqAHQAfgCICSMABATlBPMCYAkkAAQE5QTzAmEJJQAEBOUE9AJgCSYABATlBPQCYQknAAQE5wTzAmAJKAAEBOcE8wJhCSkABATnBPQCYAkqAAQE5wT0AmEJMwAEBOkE5QJgCTQABATpBOUCYQk1AAQE6QTnAmAJNgAEBOkE5wJhAAwAGgAkAC4AOABCAEwAVgBgAGoAdAB+AIgJKwAEBOUE8wJgCSwABATlBPMCYQktAAQE5QT0AmAJLgAEBOUE9AJhCS8ABATnBPMCYAkwAAQE5wTzAmEJMQAEBOcE9AJgCTIABATnBPQCYQk3AAQE6QTlAmAJOAAEBOkE5QJhCTkABATpBOcCYAk6AAQE6QTnAmEAAQADAX4BhgGSAAEAAAABAAgAAgAMAAMA8wI3BgMAAQADAEwATQRXAAEAAAABAAgAAgAKAAILZAtpAAEAAgmfCaMABAAAAAEACAABAqYAOAB2AIAAigCUAJ4AqACyALwAxgDQANoA5ADuAPgBAgEMARYBIAEqATQBPgFIAVIBXAFmAXABegGEAY4BmAGiAawBtgHAAcoB1AHeAegB8gH8AgYCEAIaAiQCLgI4AkICTAJWAmACagJ0An4CiAKSApwAAQAECeIAAgmnAAEABAnjAAIJpwABAAQJ5AACCacAAQAECeUAAgmnAAEABAnmAAIJpwABAAQJ5wACCacAAQAECegAAgmnAAEABAnpAAIJpwABAAQJ6gACCacAAQAECesAAgmnAAEABAnsAAIJpwABAAQJ7QACCacAAQAECe4AAgmnAAEABAnvAAIJpwABAAQJ8AACCacAAQAECfEAAgmnAAEABAnyAAIJpwABAAQJvgACCacAAQAECb8AAgmnAAEABAnAAAIJpwABAAQJ9gACCacAAQAECfcAAgmnAAEABAn4AAIJpwABAAQJ+QACCacAAQAECcEAAgmnAAEABAn6AAIJpwABAAQJ+wACCacAAQAECfwAAgmnAAEABAn9AAIJpwABAAQJwgACCacAAQAECcMAAgmnAAEABAn+AAIJpwABAAQJ/wACCacAAQAECgAAAgmnAAEABAoBAAIJpwABAAQKAgACCacAAQAECZYAAgmnAAEABAoDAAIJpwABAAQJxAACCacAAQAECgQAAgmnAAEABAoFAAIJpwABAAQKBgACCacAAQAECcUAAgmnAAEABAmeAAIJpwABAAQKBwACCacAAQAECaEAAgmnAAEABAoIAAIJpwABAAQKCQACCacAAQAECgoAAgmnAAEABAoLAAIJpwABAAQKDAACCacAAQAECfMAAgmnAAEABAn0AAIJpwABAAQJ9QACCacAAQAEC2UAAgmnAAEABAtrAAIJpwACAAgJcQmVAAAJlwmdACUJnwmgACwJogmmAC4JxgnHADMJ2AnYADULZAtkADYLaQtpADcABAAAAAEACAABACIAAgAKABYAAQAECg0AAwm4CaQAAQAECg4AAwm4CYsAAQACCYIJiQAEAAAAAQAIAAEELgABAAgAAQAECg8AAgm4AAQAAAABAAgAAQ80AEoAmgCmALIAvgDKANYA4gDuAPoBBgESAR4BKgE2AUIBTgFaAWYBcgF+AYoBlgGiAa4BugHGAdIB3gHqAfYCAgIOAhoD4gImAjICPgJKAlYCYgJuAnoChgKSAp4CqgK2AsICzgLaAuYC8gL+AwoDFgMiAy4DOgNGA1IDXgNqA3YDggPuA44DmgOmA7IDvgPKA9YD4gPuAAEABApXAAMJuAmdAAEABApYAAMJuAmdAAEABApZAAMJuAmdAAEABApaAAMJuAmdAAEABApbAAMJuAmdAAEABApcAAMJuAmdAAEABApdAAMJuAmdAAEABApeAAMJuAmdAAEABApfAAMJuAmdAAEABApgAAMJuAmdAAEABAphAAMJuAmdAAEABApiAAMJuAmdAAEABApjAAMJuAmdAAEABApkAAMJuAmdAAEABAplAAMJuAmdAAEABApmAAMJuAmdAAEABApnAAMJuAmdAAEABApoAAMJuAmdAAEABAppAAMJuAmdAAEABApqAAMJuAmdAAEABAqOAAMJuAmdAAEABAprAAMJuAmdAAEABApsAAMJuAmdAAEABAptAAMJuAmdAAEABApuAAMJuAmdAAEABApvAAMJuAmdAAEABApwAAMJuAmdAAEABApxAAMJuAmdAAEABAqVAAMJuAmdAAEABApyAAMJuAmdAAEABApzAAMJuAmdAAEABAqXAAMJuAmdAAEABAp0AAMJuAmdAAEABAp2AAMJuAmdAAEABAp3AAMJuAmdAAEABAp4AAMJuAmdAAEABAp7AAMJuAmdAAEABAp8AAMJuAmdAAEABAp9AAMJuAmdAAEABAqCAAMJuAmdAAEABAqHAAMJuAmdAAEABAqIAAMJuAmdAAEABAqQAAMJuAmdAAEABAqUAAMJuAmdAAEABAp+AAMJuAmdAAEABAp/AAMJuAmdAAEABAqAAAMJuAmdAAEABAqBAAMJuAmdAAEABAqDAAMJuAmdAAEABAqEAAMJuAmdAAEABAqFAAMJuAmdAAEABAqGAAMJuAmdAAEABAqJAAMJuAmdAAEABAqKAAMJuAmdAAEABAqLAAMJuAmdAAEABAqMAAMJuAmdAAEABAqNAAMJuAmdAAEABAqPAAMJuAmdAAEABAqRAAMJuAmdAAEABAqSAAMJuAmdAAEABAqTAAMJuAmdAAEABAqWAAMJuAmdAAEABAqYAAMJuAmdAAEABAqaAAMJuAmdAAEABAqbAAMJuAmdAAEABAqcAAMJuAmdAAEABAp5AAMJuAmdAAEABAp6AAMJuAmdAAEABAtmAAMJuAmdAAEABAtnAAMJuAmdAAEABAp1AAMJuAmdAAEABAqZAAMJuAmdAAQAAAABAAgAAQASAAEACAABAAQKEAACCbgAAQABCZ0ABQAAAAEACAABAA4ABAAaABoAGgAaAAEABAnZCdoJ3AndAAEABAADAAEJuAmdAAEACwAEAAAAAQAIAAEAEgABAAgAAQAEChAAAgmdAAEAAQm4AAQAAAABAAgAAQrAAEoBggGMAZYBoACaAbYBwAHKAdQB3gCkAK4AuADCAhgCIgIsAMwCQgJMAlYCYAJqAnQCfgKIApICnAKoBEQCsgK8AsYC0ALaAuQC7gL4AwIDDAMWdIx0lgM4A0IDTHSgA2IDbAN2A4B0qnS0A6IDrAO2ANYDzAPWA+AD6gP0BE4D/gQIBBIEHAQmBDAEOgREBE4EWARiAAEABAoVAAIJuAABAAQKGwACCbgAAQAEChwAAgm4AAEABAodAAIJuAABAAQKHgACCbgAAQAECiIAAgm4AAEABApGAAIJuAAEAAAAAQAIAAEJ2ABKAJoApACuALgAwgDOANgA4gDsAPYBAAEMARgBJAEwAToBRAFOAVoBZAFuAXgBggGMAZYBoAGqAbQBwANcAcoB1AHeAegB8gH8AgYCEAIaAiQCLgI4AkQCUAJaAmQCbgJ6AoQCjgKYAqICrgK6AsQCzgLYAuQC7gL4AwIDDANmAxYDIAMqAzQDPgNIA1IDXANmA3ADegABAAQKEQACCbgAAQAEChIAAgm4AAEABAoTAAIJuAABAAQKFAACCbgAAQAEChUAAwm4CeAAAQAEChYAAgm4AAEABAoXAAIJuAABAAQKGAACCbgAAQAEChkAAgm4AAEABAoaAAIJuAABAAQKGwADCbgJ4AABAAQKHAADCbgJ4AABAAQKHQADCbgJ4AABAAQKHgADCbgJ4AABAAQKHwACCbgAAQAECiAAAgm4AAEABAohAAIJuAABAAQKIgADCbgJ4AABAAQKIwACCbgAAQAECiQAAgm4AAEABApIAAIJuAABAAQKJQACCbgAAQAECiYAAgm4AAEABAonAAIJuAABAAQKKAACCbgAAQAECikAAgm4AAEABAoqAAIJuAABAAQKKwADCbgJ4AABAAQKKwACCbgAAQAECi0AAgm4AAEABApRAAIJuAABAAQKLgACCbgAAQAECi8AAgm4AAEABAowAAIJuAABAAQKMQACCbgAAQAECjIAAgm4AAEABAo1AAIJuAABAAQKNgACCbgAAQAECjcAAgm4AAEABAo8AAIJuAABAAQKQQADCbgJ4AABAAQKQgADCbgJ4AABAAQKSgACCbgAAQAECk4AAgm4AAEABAo4AAIJuAABAAQKOQADCbgJ4AABAAQKOgACCbgAAQAECjsAAgm4AAEABAo9AAIJuAABAAQKPgACCbgAAQAECj8AAwm4CeAAAQAECkAAAwm4CeAAAQAECkMAAgm4AAEABApEAAIJuAABAAQKRQACCbgAAQAECkYAAwm4CeAAAQAECkcAAgm4AAEABApJAAIJuAABAAQKSwACCbgAAQAECkwAAgm4AAEABApNAAIJuAABAAQKUgACCbgAAQAEClMAAgm4AAEABApUAAIJuAABAAQKVQACCbgAAQAEClYAAgm4AAEABAozAAIJuAABAAQKNAACCbgAAQAECiwAAgm4AAEABApQAAIJuAABAAQLagACCbgAAQAEC2wAAgm4AAQAAAABAAgAAQKgADoAegCEAI4AmACiAKwAtgDAAMoA1ADeAOgA8gD8AQYBEAEaASQBLgE4AlACjAFCAUwBVgFgAWoBdAF+AYgBkgGcAaYBsAG6AcQBzgHYAeIB7AH2AgACCgIUAh4CKAIyAjwCRgJQApYCWgJkAm4CeAKCAowClgABAAQKnQACCbgAAQAECp4AAgm4AAEABAqfAAIJuAABAAQKoAACCbgAAQAECqMAAgm4AAEABAqkAAIJuAABAAQKpQACCbgAAQAECqYAAgm4AAEABAqnAAIJuAABAAQKsAACCbgAAQAECrEAAgm4AAEABAqyAAIJuAABAAQKtAACCbgAAQAECrUAAgm4AAEABAq2AAIJuAABAAQKtwACCbgAAQAECrgAAgm4AAEABAq5AAIJuAABAAQKugACCbgAAQAECrsAAgm4AAEABAq+AAIJuAABAAQKvwACCbgAAQAECsAAAgm4AAEABArBAAIJuAABAAQKwgACCbgAAQAECsQAAgm4AAEABArFAAIJuAABAAQKxgACCbgAAQAECscAAgm4AAEABArIAAIJuAABAAQKyQACCbgAAQAECswAAgm4AAEABArNAAIJuAABAAQKzgACCbgAAQAECs8AAgm4AAEABArQAAIJuAABAAQK2QACCbgAAQAECtoAAgm4AAEABArbAAIJuAABAAQK3QACCbgAAQAECt4AAgm4AAEABArfAAIJuAABAAQK4AACCbgAAQAECuEAAgm4AAEABAriAAIJuAABAAQK4wACCbgAAQAECuQAAgm4AAEABAq8AAIJuAABAAQK5wACCbgAAQAECugAAgm4AAEABArpAAIJuAABAAQK6gACCbgAAQAECusAAgm4AAEABAq9AAIJuAABAAQK5gACCbgAAgAJClcKWgAAClwKYAAECmUKZwAJCmkKdwAMCnkKfgAbCoAKhAAhCokKiwAmCo0KmwApC2YLZwA4AAQAAAABAAgAAQNqAEoAmgCkAK4AuADCAMwA1gDgAOoA9AD+AQgBEgEcASYBMAE6AUQBTgFYAWIBbAF2AYABigGUAZ4BqAGyAbwBxgHQAdoDVgHkAe4B+AICAgwCFgIgAioCNAI+AkgCUgJcAmYCcAJ6AoQCjgKYAqICrAK2AsACygLUAt4C6ALyAvwDBgNgAxADGgMkAy4DOANCA0wDVgNgAAEABApXAAIKEAABAAQKWAACChAAAQAEClkAAgoQAAEABApaAAIKEAABAAQKWwACChAAAQAEClwAAgoQAAEABApdAAIKEAABAAQKXgACChAAAQAECl8AAgoQAAEABApgAAIKEAABAAQKYQACChAAAQAECmIAAgoQAAEABApjAAIKEAABAAQKZAACChAAAQAECmUAAgoQAAEABApmAAIKEAABAAQKZwACChAAAQAECmgAAgoQAAEABAppAAIKEAABAAQKagACChAAAQAECo4AAgoQAAEABAprAAIKEAABAAQKbAACChAAAQAECm0AAgoQAAEABApuAAIKEAABAAQKbwACChAAAQAECnAAAgoQAAEABApxAAIKEAABAAQKlQACChAAAQAECnIAAgoQAAEABApzAAIKEAABAAQKlwACChAAAQAECnQAAgoQAAEABAp2AAIKEAABAAQKdwACChAAAQAECngAAgoQAAEABAp7AAIKEAABAAQKfAACChAAAQAECn0AAgoQAAEABAqCAAIKEAABAAQKhwACChAAAQAECogAAgoQAAEABAqQAAIKEAABAAQKlAACChAAAQAECn4AAgoQAAEABAp/AAIKEAABAAQKgAACChAAAQAECoEAAgoQAAEABAqDAAIKEAABAAQKhAACChAAAQAECoUAAgoQAAEABAqGAAIKEAABAAQKiQACChAAAQAECooAAgoQAAEABAqLAAIKEAABAAQKjAACChAAAQAECo0AAgoQAAEABAqPAAIKEAABAAQKkQACChAAAQAECpIAAgoQAAEABAqTAAIKEAABAAQKlgACChAAAQAECpgAAgoQAAEABAqaAAIKEAABAAQKmwACChAAAQAECpwAAgoQAAEABAp5AAIKEAABAAQKegACChAAAQAEC2YAAgoQAAEABAtnAAIKEAABAAQKdQACChAAAQAECpkAAgoQAAIABgmCCaYAAAm+CcUAJQn2Cg4ALQtkC2UARgtpC2kASAtrC2sASQAEAAAAAQAIAAEDRgBHAJQAngCoALIAvADGANAA2gDkAO4A+AECAQwBFgEgASoBNAE+AUgBUgFcAWYBcAF6AYQBjgGYAaIBrAG2AzIBwAHKAdQB3gHoAfIB/AIGAhACGgIkAi4COAJCAkwCVgJgAmoCdAJ+AogCkgKcAqYCsAK6AsQCzgLYAuIC7AL2AwADCgM8AxQDHgMoAzIDPAABAAQKnQACChAAAQAECp4AAgoQAAEABAqfAAIKEAABAAQKoAACChAAAQAECqEAAgoQAAEABAqjAAIKEAABAAQKpAACChAAAQAECqUAAgoQAAEABAqmAAIKEAABAAQKpwACChAAAQAECqgAAgoQAAEABAqqAAIKEAABAAQKrAACChAAAQAECq4AAgoQAAEABAqwAAIKEAABAAQKsQACChAAAQAECrIAAgoQAAEABAqzAAIKEAABAAQKtAACChAAAQAECrUAAgoQAAEABAq2AAIKEAABAAQKtwACChAAAQAECrgAAgoQAAEABAq5AAIKEAABAAQKugACChAAAQAECrsAAgoQAAEABAq8AAIKEAABAAQKvQACChAAAQAECr4AAgoQAAEABAq/AAIKEAABAAQKwQACChAAAQAECsIAAgoQAAEABArDAAIKEAABAAQKxAACChAAAQAECsUAAgoQAAEABArGAAIKEAABAAQKxwACChAAAQAECsgAAgoQAAEABArJAAIKEAABAAQKygACChAAAQAECswAAgoQAAEABArNAAIKEAABAAQKzgACChAAAQAECs8AAgoQAAEABArQAAIKEAABAAQK0QACChAAAQAECtMAAgoQAAEABArVAAIKEAABAAQK1wACChAAAQAECtkAAgoQAAEABAraAAIKEAABAAQK2wACChAAAQAECtwAAgoQAAEABArdAAIKEAABAAQK3gACChAAAQAECt8AAgoQAAEABArgAAIKEAABAAQK4QACChAAAQAECuIAAgoQAAEABArjAAIKEAABAAQK5AACChAAAQAECuYAAgoQAAEABArnAAIKEAABAAQK6AACChAAAQAECuoAAgoQAAEABArrAAIKEAABAAQK7AACChAAAQAECsAAAgoQAAEABArpAAIKEAACAAQKEQpOAAAKUApWAD4LagtqAEULbAtsAEYABgAAAAEACAACAFBlNAAQABgAAgAAAC4AAQpxAAEAAQACAAMJrgmvAAEJuAm4AAEJyAnJAAEAAQAEAAAAAQABAAEAAQAAABIAAgAAAAEACAABAAgAAQAOAAEAAQpxAAIKKwmdAAQAAAABAAgAAQEWAAYAEgAeAFYAbACYAK4AAQAECzYAAwm4CZwABQAMABYAHgAoADALOQAECbgJjAmtCzgAAwm4CYwLOwAECbgJjQmtCzoAAwm4CY0LPAADCbgJnAACAAYADgs9AAMJuAmNCz4AAwm4CZwABAAKABQAHAAkC0EABAm4CY4JrQtAAAMJuAmOCz8AAwm4CY8LQgADCbgJnAACAAYADgtDAAMJuAmPC0QAAwm4CZwACgAWAB4AJgAuADYAPgBGAE4AVgBeC0gAAwm4CYQLRwADCbgJhQtOAAMJuAmTC0wAAwm4CZQLSQADCbgJmQtKAAMJuAmaC08AAwm4CZsLUAADCbgJnAtLAAMJuAmiC00ABAm4CiMJnAABAAYJhgmMCY0JjgmPCZMABAAAAAEACAABAbIADwAkAC4AOABCAHAAggCmALgAwgEWASABKgE0AVYBiAABAAQLNQACCZEAAQAECzYAAgmcAAEABAs3AAIJnAAFAAwAFAAaACIAKAs5AAMJjAmtCzgAAgmMCzsAAwmNCa0LOgACCY0LPAACCZwAAgAGAAwLPQACCY0LPgACCZwABAAKABIAGAAeC0EAAwmOCa0LQAACCY4LPwACCY8LQgACCZwAAgAGAAwLQwACCY8LRAACCZwAAQAEC0UAAgmRAAoAFgAcACIAKAAuADQAOgBAAEYATAtIAAIJhAtHAAIJhQtOAAIJkwtMAAIJlAtJAAIJmQtKAAIJmgtPAAIJmwtQAAIJnAtLAAIJogtNAAMKIwmcAAEABAtSAAIJlQABAAQLUwACCZwABABqAHAAdgB8AAQACgAQABYAHAtaAAIJjAtcAAIJjQtbAAIKYQtdAAIKYgAGAA4AFAAaACAAJgAsC14AAgmQC18AAgmVC2AAAgmbC2EAAgmcC2IAAgmfC2MAAgmiAAUADAASABgAHgAkC1QAAgmHC1kAAgmVC1gAAgmfC1YAAgmiC2gAAgtkAAEADwoRChUKFwobChwKHQoeCiAKIgolCi4KLwowCjILagAEAAAAAQAIAAEANAAEAA4AGAAiACIAAQAEC0YAAgogAAEABAtRAAIKKgACAAYADAtVAAIKFgtXAAIKLgABAAQKIAoiCi8LagAFAAAAAQAIAAIAjgAOAAMAAABOAAAAAgAKCW4JbgACCXEJcQABCXoJewABCX0JgQABCdgJ2AABCeIJ4gABCesJ7AABCe4J8gABCfUJ9QABCg8KDwACAAEABAACAAEAAgAAABcAAgAAAAEACAABACoAEgBSAFgAXgBkAGoAcAB2AHwAggCIAI4AlACaAKAApgCsALIAuAABABIJcQl6CXsJfQl+CX8JgAmBCdgJ4gnrCewJ7gnvCfAJ8QnyCfUAAglyCbEAAgl8CbAAAgl8CbEAAgl8CbIAAglzCbAAAglzCbEAAglzCbIAAglzCbMAAglyCbAAAgnjCbEAAgntCbAAAgntCbEAAgntCbIAAgnkCbAAAgnkCbEAAgnkCbIAAgnkCbMAAgnjCbAABAAAAAEACAABABoAAQAIAAIABgAMCyAAAgluCyAAAglvAAEAAQoPAAQAAAABAAgAAQIyABsAPABGAFgAYgBsAHYAgACKAJQAngDAAOIBBAEmAUgBagGMAa4B0AHaAewB9gIAAgoCFAIeAigAAQAECyEAAglvAAIABgAMCyIAAgluCyIAAglvAAEABAsjAAIJbwABAAQLJAACCW8AAQAECyUAAglvAAEABAsmAAIJbwABAAQLJwACCW8AAQAECygAAglvAAEABAspAAIJbwAEAAoAEAAWABwL7AACCW4LBQACCW8LBgACCg8LBwACCyAABAAKABAAFgAcC+QAAgluCwgAAglvCwkAAgoPCwoAAgsgAAQACgAQABYAHAvlAAIJbgsLAAIJbwsMAAIKDwsNAAILIAAEAAoAEAAWABwL5gACCW4LDgACCW8LDwACCg8LEAACCyAABAAKABAAFgAcC+cAAgluCxEAAglvCxIAAgoPCxMAAgsgAAQACgAQABYAHAvoAAIJbgsUAAIJbwsVAAIKDwsWAAILIAAEAAoAEAAWABwL6QACCW4LFwACCW8LGAACCg8LGQACCyAABAAKABAAFgAcC+oAAgluCxoAAglvCxsAAgoPCxwAAgsgAAQACgAQABYAHAvrAAIJbgsdAAIJbwseAAIKDwsfAAILIAABAAQLKwACCW8AAgAGAAwLLAACCW4LLAACCW8AAQAECy0AAglvAAEABAsuAAIJbwABAAQLLwACCW8AAQAECzAAAglvAAEABAsxAAIJbwABAAQLMgACCW8AAQAECzMAAglvAAEAGwlxCXUJegl7CX0Jfgl/CYAJgQmrCbAJsQmyCbMJtAm1CbYJtwniCeYJ6wnsCe4J7wnwCfEJ8gAFAAAAAQAIAAIAOgAOAAMAAAAeAAAAAgACCaoJqgABCeEJ4QACAAEABAACAAEAAAAAABsAAQAAAAEACAABAAYBwwABAAEJqgAFAAAAAQAIAAJMYgBkAC4AAAToAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAMAJggmCAAMJgwmDAAIJhAmIAAMJiQmLAAIJjAmPAAMJkAmQAAIJkQmmAAMJvgm+AAMJvwm/AAIJwAnAAAMJwQnBAAIJwgnFAAMJ2QnZAAMJ2gnaAAIJ3AndAAMJ4AngAC0J9gn5AAMJ+gn7AAIJ/An9AAMJ/gn+AAIJ/woMAAMKDQoNAAIKDgoOAAMKEQoRACwKEwoTAAQKFAoUAA0KFgoWABYKGAoYACkKGQoZACcKGgoaACYKGwobACMKHAocACoKHgoeACgKHwofAB4KIAogAAsKIQohABcKIgoiACUKIwojABIKJAokAAkKJQolAAUKJgomACsKJwonAA8KKAooACIKKQopABEKKgoqAAYKKworABMKLAosACQKLgouAAoKLwovABkKMAowAAwKMQoxABsKMgoyACAKMwozAB8KNAo0ABoKNQo1ACoKNwo3AAQKOAo4AA0KOgo6ABYKPAo8ACkKPQo9ACcKPgo+ACYKPwo/ACMKQApAACoKQgpCACgKQwpDAB4KRApEAAsKRQpFABcKRgpGACUKRwpHABIKSApIAAkKSQpJAAUKSgpKACoKSwpLAA8KTApMACIKTQpNABEKTgpOAAYKTwpPABMKUApQACQKUgpSAAoKUwpTABkKVApUAAwKVQpVABsKVgpWACAKVwpXAAMKWApYAAIKWQpdAAMKXgpgAAIKYQpkAAMKZQplAAIKZgp2AAMKdwp3AAIKeAp4AAMKeQp5AAIKegp7AAMKfAp8AAIKfQqBAAMKggqEAAIKhQqIAAMKiQqJAAIKigqaAAMKmwqbAAIKnAqcAAMKnwqfAAcKoAqgAA0KowqjABYKpQqlACoKpwqnACoKqAqpACMKqgqrACoKrgqvACgKsAqwACUKsQqxAAgKsgqyABcKswqzACUKtAq0ABIKtQq1AA4Ktgq2AAUKtwq3ACoKuAq4ABQKuQq5ACUKugq6ABUKuwq7AAYKvAq8ABMKvQq9ACYKvwq/ABAKwArAACUKwQrBAAwKwgrDACYKxArEACoKxQrFACUKyArIAAcKyQrJAA0KzArMABYKzgrOACoK0ArQACoK0QrSACMK0wrUACoK1wrYACgK2QrZACUK2graAAgK2wrbABcK3ArcACUK3QrdABIK3greAA4K3wrfAAUK4ArgACoK4QrhABQK4griACUK4wrjABUK5ArkAAYK5QrlABMK5grmACYK6AroABAK6QrpACUK6grqAAwK6wrsACYLNQs1AAMLNgs3AAILOAs4AAMLOgs6AAMLPAs8AAILPQs9AAMLPgs+AAILPwtAAAMLQgtCAAILQwtDAAMLRAtEAAILRQtFAAMLRgtGABwLRwtJAAMLSgtKAAILSwtLAAMLTAtNAAILTgtQAAMLUQtRACELUgtSAAMLUwtTAAILVAtUAAMLVQtVACULVgtWAAMLVwtXACULWAtYAAILWQtfAAMLYAtiAAILYwtnAAMLaAtoAAILaQtpAAMLagtqABgLawtrAAMLbAtsABgLbQttAAELygvKAB0AVACqALQAvgDKANYA4gDuAPoBBgESAR4BKgE2AUIBTgFaAWYBcgF+AYoBlgGiAa4BugHGAdIB3gHqAfYCAgIOAhoCJgIyAj4CSgJWAmICbgJ6AoYCkgKeAqoCuALGAtQC4gLwAv4DDAMaAygDNgNEA1IDYANuA3wDigOYA6YDtAPCA9AD3gPsA/oECAQWBCQEMgRABE4EXARqBHgEhgSUBKIEsAS+BMwE2gACAAEAAgAAAEYAAgABAAMAAABGAAMAAQAEAAMAAAAdAAMAAQAFAAMAAAAeAAMAAQAGAAMAAAAfAAMAAQAHAAMAAAAgAAMAAQAIAAMAAAAhAAMAAQAJAAMAAAAiAAMAAQAKAAMAAAAjAAMAAQALAAMAAAAkAAMAAQAMAAMAAAAlAAMAAQANAAMAAAAmAAMAAQAOAAMAAAAnAAMAAQAPAAMAAAAoAAMAAQAQAAMAAAApAAMAAQARAAMAAAAqAAMAAQASAAMAAAArAAMAAQATAAMAAAAsAAMAAQAUAAMAAAAtAAMAAQAVAAMAAAAuAAMAAQAWAAMAAAAvAAMAAQAXAAMAAAAwAAMAAQAYAAMAAAAxAAMAAQAZAAMAAAAyAAMAAQAaAAMAAAAzAAMAAQAbAAMAAAA0AAMAAQAcAAMAAAA1AAMAAQAdAAMAAAA2AAMAAQAeAAMAAAA3AAMAAQAfAAMAAAA4AAMAAQAgAAMAAAA5AAMAAQAhAAMAAAA6AAMAAQAiAAMAAAA7AAMAAQAjAAMAAAA8AAMAAQAkAAMAAAA9AAMAAQAlAAMAAAA+AAMAAQAmAAMAAAA/AAMAAQAnAAMAAABAAAMAAQAoAAMAAABBAAMAAQApAAMAAABCAAMAAQAqAAMAAABDAAMAAQArAAMAAABEAAMAAQAsAAMAAABFAAQAAQAEAC0AAwAAAB0ABAABAAUALQADAAAAHgAEAAEABgAtAAMAAAAfAAQAAQAHAC0AAwAAACAABAABAAgALQADAAAAIQAEAAEACQAtAAMAAAAiAAQAAQAKAC0AAwAAACMABAABAAsALQADAAAAJAAEAAEADAAtAAMAAAAlAAQAAQANAC0AAwAAACYABAABAA4ALQADAAAAJwAEAAEADwAtAAMAAAAoAAQAAQAQAC0AAwAAACkABAABABEALQADAAAAKgAEAAEAEgAtAAMAAAArAAQAAQATAC0AAwAAACwABAABABQALQADAAAALQAEAAEAFQAtAAMAAAAuAAQAAQAWAC0AAwAAAC8ABAABABcALQADAAAAMAAEAAEAGAAtAAMAAAAxAAQAAQAZAC0AAwAAADIABAABABoALQADAAAAMwAEAAEAGwAtAAMAAAA0AAQAAQAcAC0AAwAAADUABAABAB0ALQADAAAANgAEAAEAHgAtAAMAAAA3AAQAAQAfAC0AAwAAADgABAABACAALQADAAAAOQAEAAEAIQAtAAMAAAA6AAQAAQAiAC0AAwAAADsABAABACMALQADAAAAPAAEAAEAJAAtAAMAAAA9AAQAAQAlAC0AAwAAAD4ABAABACYALQADAAAAPwAEAAEAJwAtAAMAAABAAAQAAQAoAC0AAwAAAEEABAABACkALQADAAAAQgAEAAEAKgAtAAMAAABDAAQAAQArAC0AAwAAAEQABAABACwALQADAAAARQAFAAAAAQAIAAJCigAcAAoAAAWOAAAAAAAAAAAAAAAAAAAAAAACAHoJggmCAAUJhAmGAAYJhwmIAAcJjAmNAAQJjgmOAAcJjwmPAAQJkQmRAAYJkgmSAAcJkwmTAAQJlAmUAAcJlQmXAAYJmAmYAAUJmQmZAAYJmgmaAAgJmwmcAAYJnQmeAAMJnwmfAAgJoAmhAAcJogmiAAUJowmjAAgJpAmkAAYJpQmlAAgJpgmmAAUJvgm+AAUJwAnAAAYJwgnCAAcJwwnDAAQJxAnEAAUJxQnFAAYJ2QnZAAYJ3AncAAcJ3QndAAYJ4AngAAkJ9gn3AAYJ+An5AAcJ/An9AAQJ/wn/AAYKAAoAAAcKAQoBAAQKAgoCAAcKAwoEAAYKBQoFAAgKBgoGAAYKBwoHAAgKCAoIAAUKCQoJAAgKCgoKAAYKCwoLAAgKDAoMAAUKDgoOAAcKEwoTAAIKNwo3AAIKVwpXAAYKWQpbAAYKXApdAAcKYQpiAAQKYwpjAAcKZApkAAQKZgpmAAUKZwpnAAcKaApoAAQKaQppAAcKagprAAYKbApsAAUKbQptAAYKbgpuAAgKbwpwAAYKcQpxAAMKcgpyAAgKcwpzAAcKdAp0AAYKdQp1AAgKdgp2AAYKeAp4AAUKegp6AAgKewp7AAYKfQp/AAYKgAqBAAcKhQqGAAQKhwqHAAcKiAqIAAQKigqKAAUKiwqLAAcKjAqMAAQKjQqNAAcKjgqPAAYKkAqQAAUKkQqRAAYKkgqSAAgKkwqUAAYKlQqVAAMKlgqWAAgKlwqXAAcKmAqYAAYKmQqZAAgKmgqaAAYKnAqcAAULNQs1AAgLOAs4AAQLOgs6AAQLPQs9AAQLPwtAAAcLQwtDAAQLRQtFAAgLRwtHAAcLSAtIAAQLSQtJAAYLSwtLAAYLTgtOAAQLTwtQAAgLUgtSAAYLVAtUAAgLVgtWAAgLWQtZAAgLWgtdAAYLXgteAAgLXwtfAAcLYwtjAAgLZAtnAAcLaQtpAAgLawtrAAgLbQttAAEABQAAAAEACAACP4YAHAAKAAACigAAAAAAAAAAAAAAAAAAAAAAAgBnCYIJggAGCYQJhAAGCYUJiAAHCYwJjQAECY4JjgAHCY8JjwAECZEJkQAGCZIJkgAICZMJkwAECZQJlAAHCZUJlwAGCZgJmAAFCZkJmQAGCZsJmwAHCZwJnAAGCZ0JngADCZ8JoQAICaIJogAGCaQJpAAGCaUJpQAICaYJpgAFCb4JvgAGCcAJwAAGCcIJwgAHCcMJwwAECcQJxQAGCdkJ2QAGCdwJ3AAHCd0J3QAGCeAJ4AAJCfYJ+QAHCfwJ/QAECf8J/wAGCgAKAAAICgEKAQAECgIKAgAHCgMKBAAGCgYKBgAHCgcKBwAICggKCAAGCgoKCgAGCgsKCwAICgwKDAAFCg4KDgAICiUKJQACCkkKSQACClcKVwAGClkKWQAGCloKXQAHCmEKYgAECmMKYwAHCmQKZAAECmYKZgAGCmcKZwAICmgKaAAECmkKaQAHCmoKbQAGCm8KbwAHCnAKcAAGCnEKcQADCnMKcwAICnQKdAAGCnYKdgAGCngKeAAGCnsKewAGCn0KfQAGCn4KgQAHCoUKhgAECocKhwAHCogKiAAECooKigAGCosKiwAICowKjAAECo0KjQAHCo4KkQAGCpMKkwAHCpQKlAAGCpUKlQADCpcKlwAICpgKmAAGCpoKmgAGCpwKnAAGCrYKtgACCt8K3wACCzULNQAICzgLOAAECzoLOgAECz0LPQAECz8LQAAHC0MLQwAEC0ULRQAIC0cLRwAIC0gLSAAEC0kLSQAGC0sLSwAGC04LTgAFC1ILUgAGC1oLXQAGC18LXwAIC2QLZwAHC2kLaQAIC2sLawAIC20LbQABAAwAGgAmADIAPgBKAFYAYgBwAH4AjACaAKgAAwABAAIAAwAAAEwAAwABAAIABAAAAE0AAwABAAIABQAAAE4AAwABAAIABgAAAE8AAwABAAIABwAAAFAAAwABAAIACAAAAFEABAABAAIACQADAAAATAAEAAEAAgAJAAQAAABNAAQAAQACAAkABQAAAE4ABAABAAIACQAGAAAATwAEAAEAAgAJAAcAAABQAAQAAQACAAkACAAAAFEABQAAAAEACAACPD4AGgAJAAAgLAAAAAAAAAAAAAAAAAAAAAIAaAmCCYIABQmECYQABQmFCYYABgmHCYgABwmMCY0AAwmOCY4ABwmPCY8ABAmRCZEABgmSCZIABwmTCZMAAwmUCZQABwmVCZYABgmXCZgABQmZCZkABgmbCZsABgmcCZwABQmdCZ4AAwmgCaEABwmiCaIABQmkCaQABgmmCaYABQm+Cb4ABQnACcAABQnCCcIABwnDCcMABAnECcQABQnFCcUABgnZCdkABQncCdwABwndCd0ABgngCeAACAn2CfcABgn4CfkABwn8Cf0AAwn/Cf8ABgoACgAABwoBCgEAAwoCCgIABwoDCgMABQoECgQABgoGCgYABgoICggABQoKCgoABgoMCgwABQoOCg4ABwoqCioAAgpOCk4AAgpXClcABQpZClkABQpaClsABgpcCl0ABwphCmIAAwpjCmMABwpkCmQABApmCmYABQpnCmcABwpoCmgAAwppCmkABwpqCmoABgprCmwABQptCm0ABgpvCnAABgpxCnEAAwpzCnMABwp0CnQABgp2CnYABgp4CngABQp7CnsABQp9Cn0ABQp+Cn8ABgqACoEABwqFCoYAAwqHCocABwqICogABAqKCooABQqLCosABwqMCowAAwqNCo0ABwqOCo4ABgqPCpAABQqRCpEABgqTCpQABgqVCpUAAwqXCpcABwqYCpgABgqaCpoABgqcCpwABQq7CrsAAgrkCuQAAgs4CzgAAws6CzoAAws9Cz0AAws/C0AABwtDC0MABAtHC0cABwtIC0gAAwtJC0kABgtLC0sABgtOC04ABAtSC1IABgtaC10ABgtfC18ABwtkC2cABwttC20AAQAFAAAAAQAIAAI5qAAaAAkAAB2WAAAAAAAAAAAAAAAAAAAAAgBaCYIJggAFCYQJhgAGCYcJiAAHCYwJjQAECY4JjgAHCY8JjwAECZEJkQAGCZMJkwADCZQJlAAHCZUJlwAGCZgJmAAFCZkJmQAGCZsJnAAGCZ0JngADCaIJogAFCaQJpAAGCaYJpgAFCb4JvgAFCcAJwAAGCcIJwgAHCcMJwwAECcQJxAAFCcUJxQAGCdkJ2QAGCdwJ3AAHCd0J3QAGCeAJ4AAICfYJ9wAGCfgJ+QAHCfwJ/QAECf8J/wAGCgEKAQADCgIKAgAHCgMKBAAGCgYKBgAGCggKCAAFCgoKCgAGCgwKDAAFCg4KDgAHClcKVwAGClkKWwAGClwKXQAHCmEKYgAECmMKYwAHCmQKZAAECmYKZgAFCmgKaAADCmkKaQAHCmoKawAGCmwKbAAFCm0KbQAGCm8KcAAGCnEKcQADCnQKdAAGCnYKdgAGCngKeAAFCnsKewAGCn0KfwAGCoAKgQAHCoUKhgAECocKhwAHCogKiAAECooKigAFCowKjAADCo0KjQAHCo4KjwAGCpAKkAAFCpEKkQAGCpMKlAAGCpUKlQADCpgKmAAGCpoKmgAGCpwKnAAFCp8KnwACCsgKyAACCzgLOAAECzoLOgAECz0LPQAECz8LQAAHC0MLQwAEC0cLRwAHC0gLSAADC0kLSQAGC0sLSwAGC04LTgAEC1ILUgAGC1oLXQAGC18LXwAHC2QLZwAHC20LbQABAAUAAAABAAgAAjdmABoACQAAG1QAAAAAAAAAAAAAAAAAAAACAFgJggmCAAUJhAmGAAYJhwmIAAcJjAmNAAQJjgmOAAcJjwmPAAQJkQmRAAYJkwmTAAQJlAmUAAcJlQmXAAYJmAmYAAUJmQmZAAYJmwmcAAYJnQmeAAMJogmiAAYJpAmkAAYJpgmmAAUJvgm+AAUJwAnAAAYJwgnCAAcJwwnDAAQJxAnEAAUJxQnFAAYJ2QnZAAYJ3AncAAcJ3QndAAYJ4AngAAgJ9gn3AAYJ+An5AAcJ/An9AAQJ/wn/AAYKAQoBAAQKAgoCAAcKAwoEAAYKBgoGAAYKCAoIAAYKCgoKAAYKDAoMAAUKVwpXAAYKWQpbAAYKXApdAAcKYQpiAAQKYwpjAAcKZApkAAQKZgpmAAUKaApoAAQKaQppAAcKagprAAYKbApsAAUKbQptAAYKbwpwAAYKcQpxAAMKdAp0AAYKdgp2AAYKeAp4AAUKewp7AAYKfQp/AAYKgAqBAAcKhQqGAAQKhwqHAAcKiAqIAAQKigqKAAUKjAqMAAQKjQqNAAcKjgqPAAYKkAqQAAUKkQqRAAYKkwqUAAYKlQqVAAMKmAqYAAYKmgqaAAYKnAqcAAUKsQqxAAIK2graAAILOAs4AAQLOgs6AAQLPQs9AAQLPwtAAAcLQwtDAAQLSAtIAAQLSQtJAAYLSwtLAAYLTgtOAAQLUgtSAAYLWgtdAAYLXwtfAAcLZAtnAAcLbQttAAEABQAAAAEACAACNTAAGgAJAAAZHgAAAAAAAAAAAAAAAAAAAAIAXQmCCYIABQmECYUABgmGCYcABwmICYgABgmMCYwAAwmNCY0ABAmOCY4ABwmPCY8ABAmRCZEABQmTCZMAAwmUCZQABwmVCZYABgmXCZcABQmYCZgABAmZCZkABgmbCZsABgmcCZwABQmdCZ4AAwmiCaIABQmkCaQABgmlCaUABwmmCaYABAm+Cb4ABQnACcAABgnCCcIABwnDCcMABAnECcQABQnFCcUABgnZCdkABgncCdwABwndCd0ABgngCeAACAn2CfYABgn3CfkABwn8Cf0ABAn/Cf8ABgoBCgEABAoCCgIABwoDCgQABgoGCgYABgoICggABgoKCgoABgoMCgwABQokCiQAAgpICkgAAgpXClcABgpZCloABgpbCl0ABwphCmIABApjCmMABwpkCmQABApmCmYABgpoCmgAAwppCmkABwpqCmsABgpsCmwABQptCm0ABgpvCnAABgpxCnEAAwp0CnQABgp2CnYABgp4CngABQp7CnsABgp9Cn4ABgp/CoEABwqFCoYABAqHCocABwqICogABAqKCooABgqMCowABAqNCo0ABwqOCo8ABgqQCpAABQqRCpEABgqTCpQABgqVCpUAAwqYCpgABgqaCpoABgqcCpwABQs4CzgABAs6CzoABAs9Cz0ABAs/C0AABwtDC0MABAtIC0gABAtJC0kABgtLC0sAAwtOC04ABAtSC1IABgtaC10ABgtfC18ABwtkC2cABwttC20AAQAFAAAAAQAIAAIy3AAaAAkAABbKAAAAAAAAAAAAAAAAAAAAAgBRCYIJggAGCYQJhQAGCYYJiAAHCYwJjQAECY4JjgAHCY8JjwAECZEJkQAGCZMJkwADCZQJlAAHCZUJmQAGCZsJnAAGCZ0JngADCZ8JnwAHCaIJogAFCaQJpAAGCaYJpgAFCb4JvgAGCcAJwAAGCcIJwgAHCcMJwwAECcQJxQAGCdkJ2QAGCdwJ3AAHCd0J3QAGCeAJ4AAICfYJ9gAGCfcJ+QAHCfwJ/QAECf8J/wAGCgEKAQADCgIKAgAHCgMKBAAGCgYKBgAGCggKCAAGCgoKCgAGCgwKDAAFCi4KLgACClIKUgACClcKVwAGClkKWgAGClsKXQAHCmEKYgAECmMKYwAHCmQKZAAECmYKZgAGCmgKaAADCmkKaQAHCmoKbQAGCm8KcAAGCnEKcQADCnQKdAAGCnYKdgAGCngKeAAFCnsKewAGCn0KfgAGCn8KgQAHCoUKhgAECocKhwAHCogKiAAECooKigAGCowKjAADCo0KjQAHCo4KkQAGCpMKlAAGCpUKlQADCpgKmAAGCpoKmgAGCpwKnAAFCzgLOAAECzoLOgAECz0LPQAECz8LQAAHC0MLQwAEC0gLSAADC0kLSQAGC0sLSwAGC04LTgAEC1ILUgAGC1oLXQAGC2QLZwAHC20LbQABAAUAAAABAAgAAjDQABoACQAAFL4AAAAAAAAAAAAAAAAAAAACAFcJggmCAAQJhAmFAAYJhgmIAAcJjAmNAAQJjgmOAAcJjwmPAAQJkQmRAAYJkwmTAAQJlAmUAAcJlQmWAAYJlwmYAAUJmQmZAAYJmwmbAAcJnAmcAAYJnQmeAAMJnwmfAAcJogmiAAQJpAmkAAYJpQmlAAcJpgmmAAUJvgm+AAYJwAnAAAYJwgnCAAcJwwnDAAQJxAnFAAYJ2QnZAAYJ3AncAAcJ3QndAAYJ4AngAAgJ9gn2AAYJ9wn5AAcJ/An9AAQJ/wn/AAYKAQoBAAQKAgoCAAcKAwoEAAYKBgoGAAcKCAoIAAYKCgoKAAYKDAoMAAUKIAogAAIKRApEAAIKVwpXAAYKWQpaAAYKWwpdAAcKYQpiAAQKYwpjAAcKZApkAAQKZgpmAAYKaApoAAQKaQppAAcKagptAAYKbwpvAAcKcApwAAYKcQpxAAMKdAp0AAYKdgp2AAYKeAp4AAUKewp7AAYKfQp+AAYKfwqBAAcKhQqGAAQKhwqHAAcKiAqIAAQKigqKAAYKjAqMAAQKjQqNAAcKjgqRAAYKkwqTAAcKlAqUAAYKlQqVAAMKmAqYAAYKmgqaAAYKnAqcAAULOAs4AAQLOgs6AAQLPQs9AAQLPwtAAAcLQwtDAAQLSAtIAAQLSQtJAAYLSwtLAAYLTgtOAAQLUgtSAAYLWgtdAAYLZAtnAAcLbQttAAEABQAAAAEACAACLqAAGgAJAAASjgAAAAAAAAAAAAAAAAAAAAIAVgmCCYIABQmECYQABgmFCYgABwmMCY0AAwmOCY4ABwmPCY8ABAmRCZEABgmTCZMABAmUCZQABwmVCZYABgmXCZgABQmZCZkABgmbCZsABgmcCZwABQmdCZ4AAwmiCaIABQmkCaQABQmmCaYABQm+Cb4ABgnACcAABgnCCcIABwnDCcMABAnECcUABgnZCdkABgncCdwABwndCd0ABgngCeAACAn2CfkABwn8Cf0ABAn/Cf8ABgoBCgEABAoCCgIABwoDCgQABgoGCgYABwoICggABgoKCgoABgoMCgwABQowCjAAAgpUClQAAgpXClcABgpZClkABgpaCl0ABwphCmIABApjCmMABwpkCmQABApmCmYABgpoCmgABAppCmkABwpqCm0ABgpvCm8ABwpwCnAABgpxCnEAAwp0CnQABgp2CnYABgp4CngABgp7CnsABgp9Cn0ABgp+CoEABwqFCoYABAqHCocABwqICogABAqKCooABgqMCowABAqNCo0ABwqOCpEABgqTCpMABwqUCpQABgqVCpUAAwqYCpgABgqaCpoABgqcCpwABgrBCsEAAgrqCuoAAgs4CzgABAs6CzoABAs9Cz0ABAs/C0AABwtDC0MABAtIC0gABAtJC0kABgtLC0sABgtOC04ABQtSC1IABgtaC10ABgtkC2cABwttC20AAQAFAAAAAQAIAAIsdgAaAAkAABBkAAAAAAAAAAAAAAAAAAAAAgBTCYIJggAGCYQJhAAGCYUJiAAHCYwJjQAECY4JjgAHCY8JjwAFCZEJkQAGCZMJkwAECZQJlAAHCZUJmQAGCZsJnAAGCZ0JngADCaIJogAGCaQJpAAGCaYJpgAFCb4JvgAGCcAJwAAGCcIJwgAHCcMJwwAFCcQJxQAGCdkJ2QAGCdwJ3AAHCd0J3QAGCeAJ4AAICfYJ+QAHCfwJ/QAECf8J/wAGCgEKAQAECgIKAgAHCgMKBAAGCgYKBgAHCggKCAAGCgoKCgAGCgwKDAAFChQKFAACCjgKOAACClcKVwAGClkKWQAGCloKXQAHCmEKYgAECmMKYwAHCmQKZAAFCmYKZgAGCmgKaAAECmkKaQAHCmoKbQAGCm8KbwAHCnAKcAAGCnEKcQADCnQKdAAGCnYKdgAGCngKeAAGCnsKewAGCn0KfQAGCn4KgQAHCoUKhgAECocKhwAHCogKiAAFCooKigAGCowKjAAECo0KjQAHCo4KkQAGCpMKkwAHCpQKlAAGCpUKlQADCpgKmAAGCpoKmgAGCpwKnAAGCqAKoAACCskKyQACCzgLOAAECzoLOgAECz0LPQAECz8LQAAHC0MLQwAFC0gLSAAEC0kLSQAGC0sLSwAGC04LTgAFC1ILUgAHC1oLXQAGC2QLZwAHC20LbQABAAUAAAABAAgAAipeABoACQAADkwAAAAAAAAAAAAAAAAAAAACAFUJggmCAAYJhAmEAAYJhQmGAAcJiAmIAAcJjAmNAAQJjgmOAAcJjwmPAAUJkQmRAAYJkwmTAAQJlAmUAAcJlQmZAAYJmwmcAAcJnQmeAAMJogmiAAYJpAmkAAYJpgmmAAUJvgm+AAYJwAnAAAYJwgnCAAcJwwnDAAUJxAnEAAYJxQnFAAcJ2QnZAAYJ3AncAAcJ3QndAAYJ4AngAAgJ9gn3AAcJ+Qn5AAcJ/An9AAQJ/wn/AAYKAQoBAAQKAgoCAAcKAwoEAAYKBgoGAAcKCAoIAAYKCgoKAAYKDAoMAAUKVwpXAAYKWQpZAAYKWgpbAAcKXQpdAAcKYQpiAAQKYwpjAAcKZApkAAUKZgpmAAYKaApoAAQKaQppAAcKagpsAAYKbQptAAcKbwpwAAcKcQpxAAMKdAp0AAYKdgp2AAYKeAp4AAYKewp7AAYKfQp9AAYKfgp/AAcKgQqBAAcKhQqGAAQKhwqHAAcKiAqIAAUKigqKAAYKjAqMAAQKjQqNAAcKjgqQAAYKkQqRAAcKkwqUAAcKlQqVAAMKmAqYAAYKmgqaAAYKnAqcAAYKtQq1AAIK3greAAILOAs4AAQLOgs6AAQLPQs9AAQLPwtAAAcLQwtDAAULSAtIAAQLSQtJAAYLSwtLAAYLTgtOAAULUgtSAAcLWgtdAAYLbQttAAEABQAAAAEACAACKDoAGgAJAAAMKAAAAAAAAAAAAAAAAAAAAAIAVgmCCYIABgmECYQABgmFCYYABwmICYgABwmMCY0ABAmOCY4ABwmPCY8ABQmRCZEABQmTCZMAAwmUCZQABwmVCZgABgmZCZkABQmbCZwABgmdCZ4AAwmiCaIABQmkCaQABgmmCaYABQm+Cb4ABgnACcAABgnCCcIABwnDCcMABQnECcQABgnFCcUABwnZCdkABgncCdwABwndCd0ABgngCeAACAn2CfcABwn5CfkABwn8Cf0ABAn/Cf8ABgoBCgEABAoCCgIABwoDCgQABgoGCgYABwoICggABgoKCgoABgoMCgwABQonCicAAgpLCksAAgpXClcABgpZClkABgpaClsABwpdCl0ABwphCmIABApjCmMABwpkCmQABQpmCmYABgpoCmgABAppCmkABwpqCmwABgptCm0ABwpvCnAABwpxCnEAAwp0CnQABgp2CnYABgp4CngABgp7CnsABgp9Cn0ABgp+Cn8ABwqBCoEABwqFCoYABAqHCocABwqICogABQqKCooABgqMCowABAqNCo0ABwqOCpAABgqRCpEABwqTCpQABwqVCpUAAwqYCpgABgqaCpoABgqcCpwABgs4CzgABAs6CzoABAs9Cz0ABAs/C0AABwtDC0MABQtIC0gABAtJC0kABgtLC0sABgtOC04ABQtSC1IABwtaC10ABgttC20AAQAFAAAAAQAIAAImEAAaAAkAAAn+AAAAAAAAAAAAAAAAAAAAAgBVCYIJggAGCYQJhAAGCYUJhgAHCYgJiAAHCYwJjQAECY4JjgAHCY8JjwAFCZEJkQAGCZMJkwAECZQJlAAHCZUJmQAGCZsJnAAHCZ0JngADCaIJogAGCaQJpAAGCaYJpgAFCb4JvgAGCcAJwAAGCcIJwgAHCcMJwwAFCcQJxAAGCcUJxQAHCdkJ2QAGCdwJ3AAHCd0J3QAGCeAJ4AAICfYJ9wAHCfkJ+QAHCfwJ/QAECf8J/wAGCgEKAQAECgIKAgAHCgMKBAAGCgYKBgAHCggKCAAGCgoKCgAGCgwKDAAFClcKVwAGClkKWQAGCloKWwAHCl0KXQAHCmEKYgAECmMKYwAHCmQKZAAFCmYKZgAGCmgKaAAECmkKaQAHCmoKbAAGCm0KbQAHCm8KcAAHCnEKcQADCnQKdAAGCnYKdgAGCngKeAAGCnsKewAGCn0KfQAGCn4KfwAHCoEKgQAHCoUKhgAECocKhwAHCogKiAAFCooKigAGCowKjAAECo0KjQAHCo4KkAAGCpEKkQAHCpMKlAAHCpUKlQADCpgKmAAGCpoKmgAGCpwKnAAGCr8KvwACCugK6AACCzgLOAAECzoLOgAECz0LPQAECz8LQAAHC0MLQwAFC0gLSAAEC0kLSQAGC0sLSwAGC04LTgAFC1ILUgAHC1oLXQAGC20LbQABAAUAAAABAAgAAiPsABoACQAAB9oAAAAAAAAAAAAAAAAAAAACAFYJggmCAAYJhAmEAAYJhQmGAAcJiAmIAAcJjAmNAAQJjgmOAAcJjwmPAAUJkQmRAAUJkwmTAAMJlQmVAAUJlgmWAAYJlwmXAAUJmAmYAAYJmQmZAAUJmwmcAAUJnQmeAAMJogmiAAUJpAmkAAYJpQmlAAcJpgmmAAQJvgm+AAYJwAnAAAYJwgnCAAcJwwnDAAUJxAnEAAYJxQnFAAcJ2QnZAAYJ3AncAAcJ3QndAAYJ4AngAAgJ9gn3AAcJ+Qn5AAcJ/An9AAQJ/wn/AAYKAQoBAAQKAwoEAAYKBgoGAAcKCAoIAAYKCgoKAAYKDAoMAAUKKQopAAIKTQpNAAIKVwpXAAYKWQpZAAYKWgpbAAcKXQpdAAcKYQpiAAQKYwpjAAcKZApkAAUKZgpmAAYKaApoAAQKagpsAAYKbQptAAcKbwpwAAcKcQpxAAMKdAp0AAYKdgp2AAYKeAp4AAYKewp7AAYKfQp9AAYKfgp/AAcKgQqBAAcKhQqGAAQKhwqHAAcKiAqIAAUKigqKAAYKjAqMAAQKjgqQAAYKkQqRAAcKkwqUAAcKlQqVAAMKmAqYAAYKmgqaAAYKnAqcAAYLOAs4AAQLOgs6AAQLPQs9AAQLPwtAAAcLQwtDAAULSAtIAAQLSQtJAAYLSwtLAAYLTgtOAAULUgtSAAcLWgtdAAYLbQttAAEABQAAAAEACAACIcIAGgAJAAAFsAAAAAAAAAAAAAAAAAAAAAIATwmCCYIABgmECYQABgmFCYYABwmMCY0ABAmOCY4ABwmPCY8ABQmRCZEABgmTCZMABAmVCZkABgmbCZwABwmdCZ4AAwmiCaIABgmkCaQABgmmCaYABQm+Cb4ABgnACcAABgnCCcIABwnDCcMABQnECcQABgnFCcUABwnZCdkABgncCdwABwndCd0ABgngCeAACAn2CfcABwn8Cf0ABAn/Cf8ABgoBCgEABAoDCgQABgoGCgYABwoICggABgoKCgoABgoMCgwABQojCiMAAgpHCkcAAgpXClcABgpZClkABgpaClsABwphCmIABApjCmMABwpkCmQABQpmCmYABgpoCmgABApqCmwABgptCm0ABwpvCnAABwpxCnEAAwp0CnQABgp2CnYABgp4CngABgp7CnsABgp9Cn0ABgp+Cn8ABwqFCoYABAqHCocABwqICogABQqKCooABgqMCowABAqOCpAABgqRCpEABwqTCpQABwqVCpUAAwqYCpgABgqaCpoABgqcCpwABgq0CrQAAgrdCt0AAgs4CzgABAs6CzoABAs9Cz0ABAs/C0AABwtDC0MABQtIC0gABAtJC0kABwtLC0sABgtOC04ABQtSC1IABwtaC10ABwttC20AAQAFAAAAAQAIAAIfwgAaAAkAAAOwAAAAAAAAAAAAAAAAAAAAAgBJCYIJggAGCYQJhAAGCYUJhgAHCYwJjQAECY8JjwAFCZEJkQAGCZMJkwAECZUJmQAGCZsJnAAHCZ0JngADCaIJogAGCaQJpAAHCaYJpgAGCb4JvgAGCcAJwAAGCcMJwwAFCcQJxAAGCcUJxQAHCdkJ2QAGCd0J3QAGCeAJ4AAICfYJ9wAHCfwJ/QAECf8J/wAGCgEKAQAECgMKBAAGCgYKBgAHCggKCAAGCgoKCgAHCgwKDAAGCisKKwACCk8KTwACClcKVwAGClkKWQAGCloKWwAHCmEKYgAECmQKZAAFCmYKZgAGCmgKaAAECmoKbAAGCm0KbQAHCm8KcAAHCnEKcQADCnQKdAAGCnYKdgAHCngKeAAGCnsKewAGCn0KfQAGCn4KfwAHCoUKhgAECogKiAAFCooKigAGCowKjAAECo4KkAAGCpEKkQAHCpMKlAAHCpUKlQADCpgKmAAGCpoKmgAHCpwKnAAGCrwKvAACCuUK5QACCzgLOAAECzoLOgAECz0LPQAEC0MLQwAFC0gLSAAEC0kLSQAHC0sLSwAGC04LTgAFC1ILUgAHC1oLXQAHC20LbQABAAUAAAABAAgAAh3mABoACQAAAdQAAAAAAAAAAAAAAAAAAAACAEkJggmCAAYJhAmEAAYJhQmGAAcJjAmNAAQJjwmPAAUJkQmRAAcJkwmTAAQJlQmYAAYJmQmZAAcJmwmcAAcJnQmeAAMJogmiAAYJpAmkAAcJpgmmAAYJvgm+AAYJwAnAAAYJwwnDAAUJxAnEAAYJxQnFAAcJ2QnZAAYJ3QndAAcJ4AngAAgJ9gn3AAcJ/An9AAQJ/wn/AAcKAQoBAAQKAwoDAAYKBAoEAAcKBgoGAAcKCAoIAAYKCgoKAAcKDAoMAAYKVwpXAAYKWQpZAAYKWgpbAAcKYQpiAAQKZApkAAUKZgpmAAYKaApoAAQKagpsAAYKbQptAAcKbwpwAAcKcQpxAAMKdAp0AAcKdgp2AAcKeAp4AAYKewp7AAYKfQp9AAYKfgp/AAcKhQqGAAQKiAqIAAUKigqKAAYKjAqMAAQKjgqQAAYKkQqRAAcKkwqUAAcKlQqVAAMKmAqYAAcKmgqaAAcKnAqcAAYKuAq4AAIK4QrhAAILOAs4AAQLOgs6AAQLPQs9AAQLQwtDAAULSAtIAAQLSQtJAAcLSwtLAAcLTgtOAAULUgtSAAcLWgtdAAcLbQttAAEACgAWACIALgA6AEYAUgBgAG4AfACKAAMAAQACAAMAAABNAAMAAQACAAQAAABOAAMAAQACAAUAAABPAAMAAQACAAYAAABQAAMAAQACAAcAAABRAAQAAQACAAgAAwAAAE0ABAABAAIACAAEAAAATgAEAAEAAgAIAAUAAABPAAQAAQACAAgABgAAAFAABAABAAIACAAHAAAAUQAFAAAAAQAIAAIbcgAYAAgAAAq6AAAAAAAAAAAAAAAAAAIASAmCCYIABQmECYYABgmMCY0ABAmPCY8ABAmRCZEABgmTCZMAAwmVCZcABgmYCZgABQmZCZkABgmbCZwABgmdCZ4AAwmiCaIABQmkCaQABgmmCaYABQm+Cb4ABQnACcAABgnDCcMABAnECcQABQnFCcUABgnZCdkABgndCd0ABgngCeAABwn2CfcABgn8Cf0ABAn/Cf8ABgoBCgEAAwoDCgQABgoGCgYABgoICggABQoKCgoABgoMCgwABQpXClcABgpZClsABgphCmIABApkCmQABApmCmYABQpoCmgAAwpqCmsABgpsCmwABQptCm0ABgpvCnAABgpxCnEAAwp0CnQABgp2CnYABgp4CngABQp7CnsABgp9Cn8ABgqFCoYABAqICogABAqKCooABQqMCowAAwqOCo8ABgqQCpAABQqRCpEABgqTCpQABgqVCpUAAwqYCpgABgqaCpoABgqcCpwABQq6CroAAgrjCuMAAgs4CzgABAs6CzoABAs9Cz0ABAtDC0MABAtIC0gAAwtJC0kABgtLC0sABgtOC04ABAtSC1IABgtaC10ABgttC20AAQAFAAAAAQAIAAIZngAYAAgAAAjmAAAAAAAAAAAAAAAAAAIASwmCCYIABQmECYUABgmMCY0ABAmPCY8ABAmRCZEABgmTCZMAAwmVCZUABQmWCZcABgmYCZgABQmZCZkABgmbCZwABgmdCZ4AAwmiCaIABgmkCaQABgmmCaYABQm+Cb4ABQnACcAABgnDCcMABAnECcQABQnFCcUABgnZCdkABgndCd0ABgngCeAABwn2CfYABgn8Cf0ABAn/Cf8ABgoBCgEAAwoDCgQABgoGCgYABgoICggABgoKCgoABgoMCgwABQoWChYAAgo6CjoAAgpXClcABgpZCloABgphCmIABApkCmQABApmCmYABQpoCmgAAwpqCmsABgpsCmwABQptCm0ABgpvCnAABgpxCnEAAwp0CnQABgp2CnYABgp4CngABQp7CnsABgp9Cn4ABgqFCoYABAqICogABAqKCooABQqMCowAAwqOCo8ABgqQCpAABQqRCpEABgqTCpQABgqVCpUAAwqYCpgABgqaCpoABgqcCpwABQqjCqMAAgrMCswAAgs4CzgABAs6CzoABAs9Cz0ABAtDC0MABAtIC0gAAwtJC0kABgtLC0sABgtOC04ABAtSC1IABgtaC10ABgttC20AAQAFAAAAAQAIAAIXuAAYAAgAAAcAAAAAAAAAAAAAAAAAAAIAQQmCCYIABgmECYQABgmMCY0ABAmPCY8ABAmRCZEABgmTCZMABAmVCZkABgmcCZwABgmdCZ4AAwmiCaIABgmkCaQABgmmCaYABQm+Cb4ABgnACcAABgnDCcMABAnECcUABgnZCdkABgndCd0ABgngCeAABwn8Cf0ABAn/Cf8ABgoBCgEABAoDCgQABgoICggABgoKCgoABgoMCgwABQohCiEAAgpFCkUAAgpXClcABgpZClkABgphCmIABApkCmQABApmCmYABgpoCmgABApqCm0ABgpwCnAABgpxCnEAAwp0CnQABgp2CnYABgp4CngABgp7CnsABgp9Cn0ABgqFCoYABAqICogABAqKCooABgqMCowABAqOCpEABgqUCpQABgqVCpUAAwqYCpgABgqaCpoABgqcCpwABgqyCrIAAgrbCtsAAgs4CzgABAs6CzoABAs9Cz0ABAtDC0MABAtIC0gABAtJC0kABgtLC0sABgtOC04ABAtSC1IABgtaC10ABgttC20AAQAFAAAAAQAIAAIWDgAYAAgAAAVWAAAAAAAAAAAAAAAAAAIAPgmCCYIABgmECYQABgmMCY0ABAmPCY8ABAmRCZEABgmTCZMABAmVCZkABgmcCZwABgmdCZ4AAwmiCaIABgmkCaQABgmmCaYABQm+Cb4ABgnACcAABgnDCcMABAnECcUABgnZCdkABgndCd0ABgngCeAABwn8Cf0ABAn/Cf8ABgoBCgEABAoDCgQABgoICggABgoKCgoABgoMCgwABQpXClcABgpZClkABgphCmIABApkCmQABApmCmYABgpoCmgABApqCm0ABgpwCnAABgpxCnEAAwp0CnQABgp2CnYABgp4CngABgp7CnsABgp9Cn0ABgqFCoYABAqICogABAqKCooABgqMCowABAqOCpEABgqUCpQABgqVCpUAAwqYCpgABgqaCpoABgqcCpwABgs4CzgABAs6CzoABAs9Cz0ABAtDC0MABAtIC0gABAtJC0kABgtLC0sABgtOC04ABAtaC10ABgtqC2oAAgtsC2wAAgttC20AAQAFAAAAAQAIAAIUdgAYAAgAAAO+AAAAAAAAAAAAAAAAAAIAOwmCCYIABgmECYQABgmMCY0ABAmPCY8ABAmRCZEABgmTCZMABAmVCZkABgmdCZ4AAwmiCaIABgmkCaQABgmmCaYABQm+Cb4ABgnACcAABgnDCcMABAnECcQABgnZCdkABgndCd0ABgngCeAABwn8Cf0ABAn/Cf8ABgoBCgEABAoDCgQABgoICggABgoKCgoABgoMCgwABQovCi8AAgpTClMAAgpXClcABgpZClkABgphCmIABApkCmQABApmCmYABgpoCmgABApqCmwABgpxCnEAAwp0CnQABgp2CnYABgp4CngABgp7CnsABgp9Cn0ABgqFCoYABAqICogABAqKCooABgqMCowABAqOCpAABgqVCpUAAwqYCpgABgqaCpoABgqcCpwABgs4CzgABAs6CzoABAs9Cz0ABAtDC0MABAtIC0gABAtJC0kABgtLC0sABgtOC04ABQtaC10ABgttC20AAQAFAAAAAQAIAAIS8AAYAAgAAAI4AAAAAAAAAAAAAAAAAAIAKwmCCYIABgmECYQABgmMCY0ABAmPCY8ABQmTCZMABAmYCZgABgmdCZ4AAwmiCaIABgmmCaYABgm+Cb4ABgnACcAABgnDCcMABQnECcQABgnZCdkABgngCeAABwn8Cf0ABAoBCgEABAoICggABgoMCgwABgo0CjQAAgpZClkABgphCmIABApkCmQABQpmCmYABgpoCmgABApsCmwABgpxCnEAAwp4CngABgp9Cn0ABgqFCoYABAqICogABQqKCooABgqMCowABAqQCpAABgqVCpUAAwqcCpwABgs4CzgABAs6CzoABAs9Cz0ABAtDC0MABQtIC0gABAtOC04ABQttC20AAQAFAAAAAQAIAAIRygAYAAgAAAESAAAAAAAAAAAAAAAAAAIAKQmCCYIABgmMCYwABAmNCY0ABQmPCY8ABQmRCZEABgmTCZMAAwmVCZUABgmXCZcABgmYCZgABQmZCZkABgmbCZwABgmdCZ4ABAmiCaIABgmmCaYABgnDCcMABQnECcQABgngCeAABwn8Cf0ABQoBCgEABQoMCgwABgoxCjEAAgpVClUAAgphCmIABQpkCmQABQpoCmgABQpsCmwABgpxCnEABAp4CngABgqFCoYABQqICogABQqMCowABQqQCpAABgqVCpUABAqcCpwABgs4CzgABQs6CzoABQs9Cz0ABQtDC0MABQtIC0gABQtOC04ABQttC20AAQAIABIAHgAqADYAQgBQAF4AbAADAAEAAgADAAAATgADAAEAAgAEAAAATwADAAEAAgAFAAAAUAADAAEAAgAGAAAAUQAEAAEAAgAHAAMAAABOAAQAAQACAAcABAAAAE8ABAABAAIABwAFAAAAUAAEAAEAAgAHAAYAAABRAAUAAAABAAgAAhA2ABYABwAABrgAAAAAAAAAAAAAAAIAHQmMCY0ABAmPCY8ABAmTCZMABAmdCZ4AAwmiCaIABAmmCaYABQnDCcMABAngCeAABgn8Cf0ABAoBCgEABAoMCgwABQphCmIABApkCmQABApoCmgABApxCnEAAwp4CngABQqFCoYABAqICogABAqMCowABAqVCpUAAwqcCpwABQs4CzgABAs6CzoABAs9Cz0ABAtDC0MABAtGC0YAAgtIC0gABAtOC04ABAttC20AAQAFAAAAAQAIAAIPZgAWAAcAAAXoAAAAAAAAAAAAAAACABoJjAmNAAQJjwmPAAQJkwmTAAQJnQmeAAMJpgmmAAUJwwnDAAQJ4AngAAYJ/An9AAQKAQoBAAQKDAoMAAUKYQpiAAQKZApkAAQKaApoAAQKcQpxAAMKhQqGAAQKiAqIAAQKjAqMAAQKlQqVAAMLOAs4AAQLOgs6AAQLPQs9AAQLQwtDAAQLSAtIAAQLTgtOAAQLbQttAAELygvKAAIABQAAAAEACAACDqgAFgAHAAAFKgAAAAAAAAAAAAAAAgAbCYwJjQAECY8JjwAECZMJkwAECZ0JngADCaYJpgAFCcMJwwAECeAJ4AAGCfwJ/QAECgEKAQAECgwKDAAFCh8KHwACCkMKQwACCmEKYgAECmQKZAAECmgKaAAECnEKcQADCoUKhgAECogKiAAECowKjAAECpUKlQADCzgLOAAECzoLOgAECz0LPQAEC0MLQwAEC0gLSAAEC04LTgAEC20LbQABAAUAAAABAAgAAg3kABYABwAABGYAAAAAAAAAAAAAAAIAGAmMCY0ABAmPCY8ABQmTCZMABAmdCZ4AAwnDCcMABQngCeAABgn8Cf0ABAoBCgEABAozCjMAAgphCmIABApkCmQABQpoCmgABApxCnEAAwqFCoYABAqICogABQqMCowABAqVCpUAAws4CzgABAs6CzoABAs9Cz0ABAtDC0MABQtIC0gABAtOC04ABQttC20AAQAFAAAAAQAIAAINMgAWAAcAAAO0AAAAAAAAAAAAAAACABoJjAmNAAQJjwmPAAUJkwmTAAQJnQmeAAMJogmiAAUJwwnDAAUJ4AngAAYJ/An9AAQKAQoBAAQKMgoyAAIKVgpWAAIKYQpiAAQKZApkAAUKaApoAAQKcQpxAAMKhQqGAAQKiAqIAAUKjAqMAAQKlQqVAAMLOAs4AAQLOgs6AAQLPQs9AAQLQwtDAAULSAtIAAQLTgtOAAULbQttAAEABQAAAAEACAACDHQAFgAHAAAC9gAAAAAAAAAAAAAAAgAYCYwJjQAECY8JjwAFCZMJkwAECZ0JngADCcMJwwAFCeAJ4AAGCfwJ/QAECgEKAQAECmEKYgAECmQKZAAFCmgKaAAECnEKcQADCoUKhgAECogKiAAFCowKjAAECpUKlQADCzgLOAAECzoLOgAECz0LPQAEC0MLQwAFC0gLSAAEC04LTgAFC1ELUQACC20LbQABAAUAAAABAAgAAgvCABYABwAAAkQAAAAAAAAAAAAAAAIAGQmMCY0ABAmPCY8ABQmTCZMABAmdCZ4AAwnDCcMABQngCeAABgn8Cf0ABAoBCgEABAooCigAAgpMCkwAAgphCmIABApkCmQABQpoCmgABApxCnEAAwqFCoYABAqICogABQqMCowABAqVCpUAAws4CzgABAs6CzoABAs9Cz0ABAtDC0MABQtIC0gABAtOC04ABQttC20AAQAFAAAAAQAIAAILCgAWAAcAAAGMAAAAAAAAAAAAAAACABsJjAmNAAQJjwmPAAUJkwmTAAQJnQmeAAMJwwnDAAUJ4AngAAYJ/An9AAQKAQoBAAQKGwobAAIKPwo/AAIKYQpiAAQKZApkAAUKaApoAAQKcQpxAAMKhQqGAAQKiAqIAAUKjAqMAAQKlQqVAAMKqAqpAAIK0QrSAAILOAs4AAQLOgs6AAQLPQs9AAQLQwtDAAULSAtIAAQLTgtOAAULbQttAAEABQAAAAEACAACCkYAFgAHAAAAyAAAAAAAAAAAAAAAAgAdCYIJggAFCYwJjQADCY8JjwAECZMJkwADCZcJmAAFCZ0JngADCaIJogAFCaYJpgAECcMJwwAFCeAJ4AAGCfwJ/QAECgEKAQAECiwKLAACClAKUAACCmEKYgAECmQKZAAFCmgKaAAECnEKcQADCoUKhgAECogKiAAFCowKjAAECpUKlQADCzgLOAAECzoLOgAECz0LPQAEC0MLQwAFC0gLSAAEC04LTgAFC20LbQABAAYADgAaACYAMgBAAE4AAwABAAIAAwAAAE8AAwABAAIABAAAAFAAAwABAAIABQAAAFEABAABAAIABgADAAAATwAEAAEAAgAGAAQAAABQAAQAAQACAAYABQAAAFEABQAAAAEACAACCRoAFAAGAAACOAAAAAAAAAAAAAIAJAmMCY0ABAmPCY8ABAmTCZMABAmdCZ4AAwnDCcMABAngCeAABQn8Cf0ABAoBCgEABAoiCiIAAgpGCkYAAgphCmIABApkCmQABApoCmgABApxCnEAAwqFCoYABAqICogABAqMCowABAqVCpUAAwqwCrAAAgqzCrMAAgq5CrkAAgrACsAAAgrFCsUAAgrZCtkAAgrcCtwAAgriCuIAAgrpCukAAgs4CzgABAs6CzoABAs9Cz0ABAtDC0MABAtIC0gABAtOC04ABAtVC1UAAgtXC1cAAgttC20AAQAFAAAAAQAIAAIIIgAUAAYAAAFAAAAAAAAAAAAAAgAXCYwJjQAECZMJkwAECZ0JngADCeAJ4AAFCfwJ/QAECgEKAQAEChoKGgACCj4KPgACCmEKYgAECmgKaAAECnEKcQADCoUKhgAECowKjAAECpUKlQADCr0KvQACCsIKwwACCuYK5gACCusK7AACCzgLOAAECzoLOgAECz0LPQAEC0gLSAAEC20LbQABAAUAAAABAAgAAgd4ABQABgAAAJYAAAAAAAAAAAACABUJjAmNAAMJjwmPAAQJkwmTAAMJnQmeAAMJpgmmAAQJ4AngAAUJ/An9AAQKAQoBAAQKGQoZAAIKPQo9AAIKYQpiAAQKaApoAAQKcQpxAAMKhQqGAAQKjAqMAAQKlQqVAAMLOAs4AAQLOgs6AAQLPQs9AAQLSAtIAAQLbQttAAEABAAKABYAIgAwAAMAAQACAAMAAABQAAMAAQACAAQAAABRAAQAAQACAAUAAwAAAFAABAABAAIABQAEAAAAUQAFAAAAAQAIAAIGnAASAAUAAAH2AAAAAAAAAAIADgmTCZMAAwmdCZ4AAwngCeAABAoBCgEAAwoeCh4AAgpCCkIAAgpoCmgAAwpxCnEAAwqMCowAAwqVCpUAAwquCq8AAgrXCtgAAgtIC0gAAwttC20AAQAFAAAAAQAIAAIGKgASAAUAAAGEAAAAAAAAAAIADQmMCYwAAwmTCZMAAwmdCZ4AAwngCeAABAoBCgEAAwoYChgAAgo8CjwAAgpoCmgAAwpxCnEAAwqMCowAAwqVCpUAAwtIC0gAAwttC20AAQAFAAAAAQAIAAIFvgASAAUAAAEYAAAAAAAAAAIAEgmdCZ4AAwngCeAABAocChwAAgo1CjUAAgpACkAAAgpKCkoAAgpxCnEAAwqVCpUAAwqlCqUAAgqnCqcAAgqqCqsAAgq3CrcAAgrECsQAAgrOCs4AAgrQCtAAAgrTCtQAAgrgCuAAAgttC20AAQAFAAAAAQAIAAIFNAASAAUAAACOAAAAAAAAAAIABwmMCYwAAwmdCZ4AAwngCeAABAomCiYAAgpxCnEAAwqVCpUAAwttC20AAQAFAAAAAQAIAAIE7AASAAUAAABGAAAAAAAAAAIACAmMCYwAAwmTCZMAAwmdCZ4AAwngCeAABAoRChEAAgpxCnEAAwqVCpUAAwttC20AAQACAAYAEgADAAEAAgADAAAAUQAEAAEAAgAEAAMAAABRAAUAAAABAAgAAgR+AB4ACwAAA3YAAAAAAAAAAAAAAAAAAAAAAAAAAgCOCYIJggAECYMJgwAJCYQJhAAECYUJhwAFCYgJiAAGCYkJiwAICYwJjQADCY4JjgAFCY8JjwADCZAJkAAHCZEJkQAECZIJkgAGCZMJkwADCZQJlAAFCZUJmQAECZoJmgAHCZsJnAAFCZ0JngACCZ8JoQAGCaIJogAECaMJowAGCaQJpAAECaUJpQAGCaYJpgAECb4JvgAECb8JvwAJCcAJwAAECcEJwQAICcIJwgAFCcMJwwADCcQJxAAECcUJxQAFCdkJ2QAECdoJ2gAHCdwJ3AAFCd0J3QAECfYJ+AAFCfkJ+QAGCfoJ+wAICfwJ/QADCf4J/gAHCf8J/wAECgAKAAAGCgEKAQADCgIKAgAFCgMKBAAECgUKBQAHCgYKBgAFCgcKBwAGCggKCAAECgkKCQAGCgoKCgAECgsKCwAGCgwKDAAECg0KDQAHCg4KDgAGClcKVwAEClgKWAAJClkKWQAECloKXAAFCl0KXQAGCl4KXgAICl8KXwAJCmAKYAAICmEKYgADCmMKYwAFCmQKZAADCmUKZQAHCmYKZgAECmcKZwAGCmgKaAADCmkKaQAFCmoKbQAECm4KbgAHCm8KcAAFCnEKcQACCnIKcgAHCnMKcwAGCnQKdAAECnUKdQAHCnYKdgAECncKdwAHCngKeAAECnkKeQAICnoKegAHCnsKewAECnwKfAAJCn0KfQAECn4KgAAFCoEKgQAGCoIKggAICoMKgwAJCoQKhAAICoUKhgADCocKhwAFCogKiAADCokKiQAHCooKigAECosKiwAGCowKjAADCo0KjQAFCo4KkQAECpIKkgAHCpMKlAAFCpUKlQACCpYKlgAHCpcKlwAGCpgKmAAECpkKmQAHCpoKmgAECpsKmwAHCpwKnAAECzULNQAGCzgLOAADCzoLOgADCz0LPQADCz8LQAAFC0MLQwADC0ULRQAGC0cLRwAGC0gLSAADC0kLSQAEC0oLSgAIC0sLSwAEC0wLTAAIC04LTgADC08LUAAHC1ILUgAFC1MLUwAKC1QLVAAHC1YLVgAHC1kLWQAHC1oLWwAEC1wLXQAFC14LXgAHC18LXwAGC2ALYQAJC2ILYwAHC2QLZwAGC2kLaQAGC2sLawAGC20LbQABAAkAFAAeACgAMgA8AEYAUABaAGQAAgABAAIAAABHAAIAAQADAAAASAACAAEABAAAAEkAAgABAAUAAABKAAIAAQAGAAAASwACAAEABwAAAEwAAgABAAgAAABNAAIAAQAJAAAATgACAAEACgAAAFAAAQAAAAEACAABAJIAAQABAAAAAQAIAAEAhAACAAEAAAABAAgAAQB2AAMAAQAAAAEACAABAGgABAABAAAAAQAIAAEAWgAFAAEAAAABAAgAAQBMAAYAAQAAAAEACAABAD4ABwABAAAAAQAIAAEAMAAIAAEAAAABAAgAAQAiAAkAAQAAAAEACAABABQACgABAAAAAQAIAAEABgALAAEAAQttAAUAAAABAAgAAgEcABQABgAAAJwAAAAAAAAAAAACABYJbwlvAAMJggmmAAIJvgnFAAIJ9goOAAIKDwoPAAQKVwqcAAILIAsgAAULNQs1AAILOAs4AAILOgs6AAILPAs9AAILPwtAAAILQwtDAAILRQtFAAILRwtMAAILTgtQAAILUgtUAAILVgtWAAILWQtnAAILaQtpAAILawtrAAILbQt4AAEAAwAIABgAKAADAAIAAgADAAAAVAACAFMAAwACAAIABAAAAFUAAgBTAAMAAgACAAUAAABWAAIAUwABAAAAAQAIAAIADAADC50LnQudAAEAAwlvCg8LIAABAAAAAQAIAAEAIgAMAAEAAAABAAgAAQAUABgAAQAAAAEACAABAAYAJAACAAELbQt4AAAABgAAAAEACAACAYgAEAFeD9IAAgAAAWYAAgA3CYIJggACCYQJhAACCYwJjQACCY8JjwACCZEJkQACCZMJkwACCZUJmQACCZwJngACCaIJogACCaQJpAACCaYJpgACCb4JvgACCcAJwAACCcMJxQACCdkJ2QACCd0J3QACCfwJ/QACCf8J/wACCgEKAQACCgMKBAACCggKCAACCgoKCgACCgwKDAACClcKVwACClkKWQACCmEKYgACCmQKZAACCmYKZgACCmgKaAACCmoKbQACCnAKcQACCnQKdAACCnYKdgACCngKeAACCnsKewACCn0KfQACCoUKhgACCogKiAACCooKigACCowKjAACCo4KkQACCpQKlQACCpgKmAACCpoKmgACCpwKnAACCzgLOAACCzoLOgACCz0LPQACC0MLQwACC0gLSQACC0sLSwACC04LTgACC1ILUgACC1oLWwACC24LcQABAAEJbgABAAEAAQAEAAIAAgABAAEAAAABAAAAWAABAAAAAQAIAAEABgJ1AAEAAQluAAUAAAABAAgAAgAYAHYACAAAASIBMgFCAVwAAAAAAAAAAQAtCdkJ2gncCd0KWwpdCmEKYgpjCmQKcwp/CoEKhQqGCocKiAqXCpwLOAs6Cz0LPwtAC0MLRwtIC0kLSgtLC0wLTQtOC1QLVgtbC10LXgtfC2ALYQtiC2MLZgtnAAIAHAmsCa4ABQmvCa8ABgm4CbgABQnICckABgnZCdoABAncCd0ABAoQChAABwpbClsAAwpdCl0AAwphCmQAAwpzCnMAAwp/Cn8AAwqBCoEAAwqFCogAAwqXCpcAAwqcCpwAAgs4CzgAAQs6CzoAAQs9Cz0AAQs/C0AAAQtDC0MAAQtHC04AAgtUC1QAAgtWC1YAAgtbC1sAAwtdC10AAwteC2MAAgtmC2cAAwACAAYAMAACAAEABQAAAF0AAgAGACAAAgABAAUAAQBcAAIABgAQAAIAAQAFAAAAWwACAAEABgAAAF0AAQAEAAMAAQAHAAUAAQBaAAIAAAABAAgAAQAIAAEADgABAAEKEAABC50AAgAAAAEACAABACoAEgBSAFgAXgBkAGoAcAB2AHwAhACMAJQAnACkAKwAtAC6AMAAxgABABIKWwpdCmEKYgpjCmQKcwp/CoEKhQqGCocKiAqXC1sLXQtmC2cAAgmGC50AAgmIC50AAgmMC50AAgmNC50AAgmOC50AAgmPC50AAgmgC50AAwmGCacLnQADCYgJpwudAAMJjAmnC50AAwmNCacLnQADCY4JpwudAAMJjwmnC50AAwmgCacLnQACC1oLnQACC1wLnQACC2QLnQADC2QJpwudAAEAAAABAAgAAgAOAAQL2gvbC9wL4AABAAQJrAmtCa4JuAACAAAAAQAIAAEAVgAoAKoAsAC2ALwAwgDIAM4A1ADaAOAA5gDsAPIA+AD+AQQBCgEQARYBHAEiASgBLgE0AToBQAFGAU4BVAFaAWABaAFwAXYBfAGCAYgBjgGUAZoAAQAoClsKXQphCmIKYwpkCnMKfwqBCoUKhgqHCogKlws4CzoLPQs/C0ALQwtHC0gLSQtKC0sLTAtNC04LVAtWC1sLXQteC18LYAthC2ILYwtmC2cAAgu6ChAAAgu7ChAAAgu8ChAAAgu9ChAAAgu+ChAAAgu/ChAAAgvAChAAAgvCChAAAgvDChAAAgvEChAAAgvFChAAAgvGChAAAgvHChAAAgvIChAAAgobCYwAAgobCY0AAgocCY0AAgodCY8AAgodCY4AAgoeCY8AAgoiCYUAAgoiCYQAAgoiCZkAAgoiCZoAAgoiCaIAAgoiCZQAAwoiCiMJnAACCiIJkwACC8oJhwACC8oJogADCjALvAoQAAMKMAu9ChAAAgoyCZAAAgoyCZUAAgoyCZsAAgoyCZwAAgoyCZ8AAgoyCaIAAgvBChAAAgvJChAABQAAAAEACAACABIAogAFAAABugHIAdoAAAABAEYJggmGCYgJjAmNCY4JjwmWCZgJnQmeCaEJvgnACcEJwgnDCcQJxQn2CfcJ+An5CfoJ+wn8Cf0J/gn/CgAKAQoCCgMKBAoFCgYKBwoICgkKCgpXCmgKbAp7Cn0KfgqACoIKhAqJCooKiwqMCo0KjgqPCpAKkQqSCpMKlAqWCpgKmQqaCzULWgtcC2ULawACAC4JggmCAAMJhgmGAAMJiAmIAAMJjAmPAAMJlgmWAAIJmAmYAAMJnQmeAAMJoQmhAAEJvgm+AAIJwAnAAAIJwQnBAAEJwgnEAAIJxQnFAAEJyAnJAAQJ9gn2AAEJ9wn3AAIJ+An4AAEJ+Qn5AAIJ+gn7AAEJ/An9AAIJ/goAAAEKAQoBAAIKAgoKAAEKVwpXAAMKaApoAAMKbApsAAMKewp7AAIKfQp9AAIKfgp+AAEKgAqAAAEKggqCAAEKhAqEAAEKiQqLAAEKjAqMAAIKjQqNAAEKjgqOAAIKjwqPAAEKkAqQAAIKkQqUAAEKlgqWAAEKmAqaAAELNQs1AAMLWgtaAAMLXAtcAAMLZQtlAAELawtrAAEAAQAEAAIAAQAEAAAAYQABAAQAAgACAAQAAABhAAEAYgABAAQAAgABAAQAAQBjAAUAAAABAAgAAgAaAIoACQAAAYoBmAHKAc4B6gAAAAAAAAACABIJlgmWAAAJoQmhAAEJvgm+AAIJwAnFAAMJ9gn5AAkJ+woKAA0KWApYAB0KaApoAB4Kewp+AB8KgAqAACMKggqCACQKhAqEACUKiQqUACYKlgqWADIKmAqaADMKnAqcADYLZQtlADcLawtrADgAAgAqCZYJlgABCaEJoQADCawJrAAGCa0JrQAHCa4JrwAICb4JvgADCcAJwAABCcEJwQADCcIJwwACCcQJxQADCfYJ9gABCfcJ9wACCfgJ+AADCfkJ+QACCfsJ+wABCfwJ/QACCf4J/gABCf8J/wAECgAKAAABCgEKAQAFCgIKAgAECgMKBAADCgUKBgABCgcKBwAECggKCgADClgKWAABCmgKaAABCnsKewAECnwKfAABCn0KfgAECoAKgAAECoIKggADCoQKhAADCokKiQADCooKlAAECpYKlgAECpgKmAAECpkKmQADCpoKmgAECpwKnAABC2ULZQAEC2sLawABAAEABAACAAEABgABAGAAAwAIABYAJAACAAIABgAAAGEAAQBiAAIAAgAHAAAAYQABAGIAAgACAAgAAABhAAEAYgABAAwAAwAIABIAIAACAAEABgAAAGEAAgABAAcAAABhAAEABAACAAEACAAAAGEAAQAAAAEACAABAAYCAAABAAEJrAACAAAAAQAIAAEAfAA7ANQA2gDgAOYA7ADyAPgA/gEEAQoBEAEWARwBIgEoAS4BNAE6AUABRgFMAVIBWAFeAWQBagFwAXYBfAGCAYgBjgGUAZoBoAGmAawBsgG4Ab4BxAHKAdAB1gHcAeIB6AHuAfQB+gIAAgYCDAISAhgCHgIkAioCMAACAA4JlgmWAAAJoQmhAAEJvgm+AAIJwAnFAAMJ9goKAAkKewp7AB4KfQp+AB8KgAqAACEKggqEACIKiQqUACUKlgqcADELZQtlADgLZwtnADkLawtrADoAAgmVCacAAgmgCacAAgmCCacAAgmECacAAgmJCacAAgmOCacAAgmPCacAAgmYCacAAgmcCacAAgmFCacAAgmGCacAAgmHCacAAgmICacAAgmKCacAAgmLCacAAgmMCacAAgmNCacAAgmQCacAAgmRCacAAgmSCacAAgmTCacAAgmUCacAAgmXCacAAgmZCacAAgmaCacAAgmbCacAAgmfCacAAgmiCacAAgmjCacAAgmkCacAAgpXCacAAgpZCacAAgpaCacAAgpcCacAAgpeCacAAgpfCacAAgpgCacAAgplCacAAgpmCacAAgpnCacAAgpoCacAAgppCacAAgpqCacAAgprCacAAgpsCacAAgptCacAAgpuCacAAgpvCacAAgpwCacAAgpyCacAAgpzCacAAgp0CacAAgp1CacAAgp2CacAAgp3CacAAgp4CacAAgtkCacAAgtmCacAAgtpCacABAAAAAEACAABADoAAQAIAAYADgAUABoAIAAmACwLqwACCawLrgACCa0LsAACCa4LsgACCa8LtQACCcgLuAACCckAAQABCacAAQAAAAEACAACAAoAAguzC7cAAQACCcgJyQAEAAAAAQAIAAEBpAAKABoAUgBkAHYAmAD6ARQBNgFwAYIABgAOABYAHgAmACwAMgsCAAMJpwmsCwMAAwmnCa0LBAADCacJrgr9AAIJrAr+AAIJrQr/AAIJrgACAAYADAr7AAIJrAr8AAIJrQACAAYADAsAAAIJrAsBAAIJrQAEAAoAEAAWABwK7QACCawK7wACCa0K8QACCa4K8gACCa8ACwAYAB4AJAAqADAANgA8AEIASgBSAFoLqgACCawLrQACCa0LrwACCa4LsQACCa8LuQACCbgLtAACCcgLtgACCckLzQADC50JrAvQAAMLnQmtC9MAAwudCa4L2QADC50JuAADAAgADgAUCwIAAgmsCwMAAgmtCwQAAgmuAAQACgAQABYAHArzAAIJrAr0AAIJrQr1AAIJrgr2AAIJrwAHABAAFgAcACIAKAAuADQLywACCawLzgACCa0L0QACCa4L1AACCa8L1wACCbgL1QACCcgL1gACCckAAgAGAAwK9wACCawK+QACCa0ABAAKABAAFgAcC8wAAgmsC88AAgmtC9IAAgmuC9gAAgm4AAEACgmTCZ0JngmmCacKAQoMChAKeAudAAUAAAABAAgAAQBQAAIACgAKAAIABgAUAAIAAgmsAAAAZgABAGgAAgACCa0AAABnAAEAaAABAAAAAQAIAAIAHAACCvsLAAABAAAAAQAIAAIACgACCvwLAQABAAIKcQqVAAEAAAABAAgAAgAKAAIKEAoQAAEAAgmsCa0ABgAAAAEACAACAa4AEAEWASwAAgAAATQAAgArCYIJggADCYYJhgACCYgJiAABCYwJjAABCY0JjQACCY8JjwABCZMJkwABCZgJmAADCaAJoQABCb4JvgADCcMJwwABCcQJxAADCfcJ9wACCfkJ+QABCfwJ/AABCf0J/QACCgEKAQABClcKVwADClsKWwACCl0KXQABCmEKYgABCmQKZAABCmgKaAABCmwKbAADCnMKcwABCnsKewADCn8KfwACCoEKgQABCoUKhQABCoYKhgACCogKiAABCowKjAABCpAKkAADCpcKlwABCzULNQADCzgLOAABCzoLOgABCz0LPQACC0MLQwABC0cLTAABC04LTgABC14LXwABC2ILZwABAAIAAwmrCasAAQsFCwcAAQvsC+wAAQABCWsAAQABAAMACAAWACQAAQABAAEAAAABAAAAagABAAIAAQAAAAEAAABrAAEAAwABAAAAAQAAAGwAAQAAAAEACAACAEAABQueC6ELpAunC+0AAQAAAAEACAACACgABQufC6ILpQuoC+4AAQAAAAEACAACABAABQugC6MLpgupC+8AAQAFCasLBQsGCwcL7AAEAAAAAQAIAAEA3gASACoANAA+AEgAUgBcAGYAcAB6AIQAjgCYAKIArAC2AMAAygDUAAEABArKAAIL2QABAAQK0QACC9kAAQAECtMAAgvZAAEABArVAAIL2QABAAQK1wACC9kAAQAECkYAAgu5AAEABApBAAIJuAABAAQKQgACCbgAAQAECjkAAgm4AAEABAo/AAIJuAABAAQKQAACCbgAAQAECsMAAgm4AAEABArsAAIJuAABAAQKoQACC9cAAQAECqgAAgvXAAEABAqqAAIL1wABAAQKrAACC9cAAQAECq4AAgvXAAEAEgmGCYwJjQmOCY8JkwnCCcMJ9wn8Cf0KeAqcC7oLvAu9C74Lvw=="),this.addFont("NotoSans-Bold.ttf","NotoSans","bold")}])};var Se=t(90631),Pe=t(99094),Fe=t(82326);function Ye(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Ue(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Ye(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Ye(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Ne="NotoSans",je="Helvetica",ze=["H1","H2","H3","H4","H5","H6"],He=/^((?!chrome|android).)*safari/i.test(navigator.userAgent),Oe=96/72,Le=e=>e/Oe,Ge=e=>e*Oe,Je=function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",t=arguments.length>2?arguments[2]:void 0,{pageHeight:o,pageWidth:i,lineHeight:a=15,pageMarginHorizontal:s=0,pageMarginVertical:r=0,marginVertical:A=[0,0],marginHorizontal:c=[0,0],headerHeight:l=0,footerHeight:d=0}=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{};t+=A[0];const u=n.replace((0,Se.A)(),"").replace(/[\u2192\u2190]/g,"-").replace(/[\u2191\u2193]/g,"|").trim();return e.splitTextToSize(u,i-2*s-c[1]).forEach((n=>{t>o-r-d&&(e.addPage(),t=l+r+a),e.text(n,s+c[0],t),t+=a})),t+A[1]},qe=function(e,n,t){var o;let{simpleTable:i,fontFamily:a,textColor:s,pageMarginHorizontal:r}=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{};const A=null===n||void 0===n?void 0:n.querySelector("table"),c=null===A||void 0===A?void 0:A.id,l=null===(o=A.querySelector("caption"))||void 0===o?void 0:o.textContent;if(!c)return t;try{return(0,xe.cs)(e,{startY:i?t+20:t,html:'[data-report-table-id="'.concat(c,'"]'),didDrawPage:()=>{l&&(e.setFont(a,"bold"),e.setFontSize(12),e.setTextColor(s),e.text(l,r,t+10))},headStyles:{fillColor:"#00AB44",textColor:"#fff",fontSize:8},bodyStyles:{fontSize:8}}),e.lastAutoTable.finalY+30}catch(d){return he.wd("Error while rendering report table: ".concat("string"===typeof d?d:"unknown")),t}},Ke=function(e,n,t){let o=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{};if(!n)return t;const i=(e=>{const n=[],t=e=>!!e&&["DIV","P","UL","OL","ARTICLE",...ze].includes(e.tagName)&&!getComputedStyle(e).display.includes("inline"),o=function(e){let i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[],a=arguments.length>2&&void 0!==arguments[2]?arguments[2]:0;if(e.nodeType===Node.TEXT_NODE){var s,r;const n=e.nodeValue.replace(/\s+/g," ").trim(),t=ze.includes(null===e||void 0===e||null===(s=e.parentElement)||void 0===s?void 0:s.tagName),o=null===e||void 0===e||null===(r=e.parentElement)||void 0===r||null===(r=r.classList)||void 0===r?void 0:r.contains("code-line");n&&i.push({text:n,isHeader:t,isCode:o})}else if(e.nodeType===Node.ELEMENT_NODE)if("UL"===e.tagName||"OL"===e.tagName){const t="OL"===e.tagName;let i=1;a+=1,e.childNodes.forEach((e=>{if(e.nodeType===Node.ELEMENT_NODE&&"LI"===e.tagName){const s=[{text:t?"".concat(i,". "):"\u2022 ",listLevel:a}];n.push(s),o(e,s,a),i++}}))}else{const s=Array.from(e.childNodes),r=!a&&t(e)?[]:i;for(const e of s)o(e,r,a);!a&&t(e)&&r.length>0&&n.push(r)}};return o(e),n})(n);return i.forEach((n=>{var i;const{isCode:a,isHeader:s}=n[0]||{},r=n.reduce(((e,n)=>[...e,(null===n||void 0===n?void 0:n.text)||""]),[]).join(" "),A=null===(i=n.find((e=>{let{listLevel:n}=e;return n>0})))||void 0===i?void 0:i.listLevel,c=Ue(Ue({},o),{},{marginVertical:a||A>0?[0,0]:[5,5],marginHorizontal:a?[10,0]:A>0?[10*(A-1),0]:[0,0]});1===n.length&&s?e.setFont(Ne,"bold"):a?e.setFont("Courier","normal"):e.setFont(Ne,"normal"),t=Je(e,r,t,c)})),t},Ve=async function(e,n,t){let{backgroundColor:o,pageHeight:i,pageWidth:a,pageMarginHorizontal:s=0,pageMarginVertical:r=0,marginVertical:A=[0,0],headerHeight:c=0,footerHeight:l=0}=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{};if(!n)return t;t+=A[0];const{width:d}=n.getBoundingClientRect(),u=Ge(a-2*s)/d;n.style.transformOrigin="top left",n.style.transform="scale(".concat(u,")"),await function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1e3;return new Promise((n=>setTimeout((()=>n()),e)))}(100);const{width:h,height:m}=n.getBoundingClientRect();if(!h||!m)return t;const g=a-2*s,p=Le(m+2*r);try{const a=(await fe()(n,{useCORS:!0,foreignObjectRendering:!He,width:Ge(g),height:Ge(p),scale:1,backgroundColor:o,ignoreElements:e=>e.hasAttribute("data-noprint"),onclone:e=>{e.querySelectorAll(".dygraph-axis-label").forEach((e=>{e.style="color:#000;"}))}})).toDataURL("image/jpeg",.85);t>i-p-r-l&&(e.addPage(),t=c+r),e.rect(s,t,g,p,"F"),e.addImage(a,"JPEG",s,t,g,p),t=t+p+20}catch(f){}return t+A[1]},Xe=async function(e){let{data:n}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};ke(ye.Ay),Re(ye.Ay);const t=Ne,o="#F6F7F7",i="#526161",a=595.28,s=841.89,r=new ye.Ay({orientation:"portrait",unit:"pt",format:"a4",compress:!0});r.setFillColor(o);let A=180;for(const c of Array.from(e.querySelectorAll("[data-block-type]"))){const e=c.dataset.blockType;e!==Fe.Z.HEADER?e!==Fe.Z.TEXT?e!==Fe.Z.TABLE&&e!==Fe.Z.MARKDOWN_TABLE&&e!==Fe.Z.LOAD_SYSTEMD_JOURNAL&&e!==Fe.Z.LOAD_WINDOWS_EVENTS?e!==Fe.Z.LOAD_CHART&&e!==Fe.Z.CHART||(A=await Ve(r,c,A,{backgroundColor:o,pageHeight:s,pageWidth:a,pageMarginHorizontal:20,pageMarginVertical:20,marginVertical:[0,8],headerHeight:35,footerHeight:25})):A=await qe(r,c,A,{simpleTable:e===Fe.Z.TABLE,fontFamily:t,textColor:i,pageMarginHorizontal:20}):(r.setFont(t,"normal"),r.setFontSize(11),r.setTextColor(i),A=Ke(r,c,A,{pageHeight:s,pageWidth:a,lineHeight:15,pageMarginHorizontal:20,pageMarginVertical:20,headerHeight:35,footerHeight:25})):(r.setFont(t,"bold"),r.setFontSize(14),r.setTextColor(i),A=Je(r,c.innerText,A,{pageHeight:s,pageWidth:a,lineHeight:20,pageMarginHorizontal:20,pageMarginVertical:20,marginVertical:[5,5],headerHeight:35,footerHeight:25}))}return function(e){let{data:n={},fontFamily:t=je,bigHeaderHeight:o=100,pageMarginHorizontal:i,pageMarginVertical:a,headerHeight:s,footerHeight:r}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if(!e)return;const{report:A={},headerLines:c=[]}=n,{definitionName:l}=A,d="Made with \u2665 by Netdata - ".concat(Pe.$i),u="#EDF0F0",h=e.getNumberOfPages(),m=e.internal.pageSize.getWidth(),g=e.internal.pageSize.getHeight()-r+10;for(let p=1;p<=h;p++){if(e.setPage(p),1===p){e.setFillColor(u),e.rect(0,0,m,o,"F"),e.addImage(Pe.EF,"PNG",i,5,Le(205),Le(40)),e.setFillColor("#00AB44"),e.lines([[200,0],[0,o],[-100,0]],m-200,0,[1,1],"F",!0);let n=2*a+Le(40);l&&(e.setFont(t,"bold"),e.setFontSize(10),e.text("".concat(l," Report"),i,n),n+=18),Array.isArray(c)&&c.forEach((o=>{o[0]&&o[1]&&(e.setFont(t,"normal"),e.setFontSize(10),e.text(o[0],i,n),e.setFont(t,"bold"),e.text(o[1],i+e.getTextWidth(o[0])+2,n),n+=15)}))}else e.setFillColor(u),e.rect(0,0,m,s,"F"),e.addImage(Pe.EF,"PNG",m-i-Le(102.5),(s-Le(20))/2,Le(102.5),Le(20));e.setFillColor(u),e.rect(0,g-20,m,r+10,"F"),e.setTextColor("#526161"),e.setFontSize(10),e.text(d,i,g);const n="Page ".concat(p," of ").concat(h),A=e.getTextWidth(n);e.text(n,m-i-A,g)}return e}(r,{data:n,fontFamily:t,bigHeaderHeight:150,pageMarginHorizontal:20,pageMarginVertical:20,headerHeight:35,footerHeight:25})};var We=t(79022);const Ze=["reportId"],$e=["isLoading"];function en(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function nn(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?en(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):en(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const tn=(0,M.A)(u.D),on=(0,C.A)((e=>{let{reportId:n}=e,t=(0,f.A)(e,Ze);const[o,,a]=(0,T.A)(),{loaded:s,report:r,error:A}=(0,c.ef)({id:n}),[,l]=(0,V.A)(),d=(0,de.A)({id:n}),u=(0,i.useMemo)((()=>"Netdata Insights - ".concat(d))),{lines:h}=(e=>{let{reportId:n}=e;const{loaded:t,report:o,error:i}=(0,c.ef)({id:n}),{createdAt:a,variables:s}=t&&!i&&o||{};return{lines:[["Time period:",(0,We.p_)(s)],["Created at:",(0,le.A)({reportDate:a?new Date(a):null})],["Report ID:",n],["Report name:",(0,de.A)({id:n})]]}})({reportId:n});return s&&!A?(0,m.jsx)(Qe,{isDisabled:!o,renderAccessor:e=>{let{isLoading:n}=e,o=(0,f.A)(e,$e);return(0,m.jsx)(tn,nn(nn({width:"16px",height:"16px",disabled:n},o),t))},options:{autoDownload:!0,generator:Xe,generatorProps:{data:{report:r,headerLines:h}},fileName:u,onError:l},children:(0,m.jsx)(De.A,nn(nn({},r||{}),{},{flavour:"printable",onReady:a}))}):null}));var an=t(85702);function sn(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function rn(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?sn(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):sn(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const An=(0,ne.A)(a.Icon),cn=e=>(0,m.jsx)(An,rn({name:"clock_hollow",width:"18px",height:"18px",color:"menuItem",cursor:"pointer",tooltip:"Schedule report",tooltipProps:{align:"bottom"},noWrapper:!0},e)),ln=e=>{let{reportId:n}=e;return(0,m.jsx)(an.A,{reportId:n,Accessor:e=>(0,m.jsx)(cn,rn({},e))})},dn="insights-report-toolbar-container",un=e=>{let{reportId:n,status:t,variables:o,isReportPage:f,isScheduledReport:y,flavour:b,closeMenu:E,associateReport:w}=e;const[,B]=(0,s.A)(),[,,M]=(0,r.A)(B),T=(0,A._F)(),{goToReport:I}=(0,d.A)(),{reset:v}=(0,l.A)(),_="inlineMenu"===b,D=(0,c.CF)({id:n,nested:!1}),{scheduledReportId:x}=D||{},k=D&&!x,R=(0,i.useCallback)((()=>{I(n)}),[n,I]),S=(0,i.useCallback)(((e,n)=>{const t=T.findIndex((n=>n.id===e));t>-1&&M(t,n)}),[T,M]),P=(0,i.useCallback)((()=>{f?S(n,!0):"function"===typeof E&&E()}),[n,f,S,E]),F=(0,i.useMemo)((()=>({feature:"Insights",uiPart:b||"toolbar",reportId:n,reportStatus:t})),[n,t,b]);return _?(0,m.jsxs)(a.Flex,{"data-testid":dn,alignItems:"center",gap:2,padding:[0,1,0,0],children:[(0,m.jsx)(g,{status:t,flavour:b,onClick:w,tooltip:"Add to conversation with AI",logPayload:rn(rn({},F),{},{description:"Add report to conversation with AI"})}),(0,m.jsx)(ce,{reportId:n,tool:h.Y.share,flavour:b,icon:"share",label:"Share",tooltip:"Share report",getToolVisibility:()=>"COMPLETED"===t,logPayload:rn(rn({},F),{},{description:"Share report"})}),(0,m.jsx)(p.A,{reportId:n,tool:h.Y.delete,status:t,flavour:b,onDelete:P,onSuccess:v,getToolVisibility:()=>y||["COMPLETED","FAILED"].includes(t),logPayload:rn(rn({},F),{},{description:"Delete report"})})]}):(0,m.jsxs)(a.Flex,{"data-testid":dn,alignItems:"center",gap:4,children:[(0,m.jsx)(g,{status:t,flavour:b,onClick:w,tooltip:"Add to conversation with AI",logPayload:rn(rn({},F),{},{description:"Add report to conversation with AI"})}),f?null:(0,m.jsx)(u.A,{tool:h.Y.openReport,status:t,flavour:b,icon:"goToNode",label:"Open report",onClick:R,tooltip:"Open report in new tab",logPayload:rn(rn({},F),{},{description:"Open report to tab"})}),(0,m.jsx)(Q,{tool:h.Y.info,reportId:n,status:t,variables:o,flavour:b,icon:"informationPress",tooltip:"See report details",logPayload:rn(rn({},F),{},{description:"Show report info"})}),(0,m.jsx)(on,{reportId:n,tool:h.Y.download,status:t,flavour:b,icon:"download",label:"Download",tooltip:"Download report",logPayload:rn(rn({},F),{},{description:"Download report"})}),(0,m.jsx)(u.A,{tool:h.Y.schedule,status:t,flavour:b,logPayload:rn(rn({},F),{},{description:"Schedule report"}),Component:()=>(0,m.jsx)(ln,{reportId:n}),getToolVisibility:e=>(0,C.m)(e)&&k}),(0,m.jsx)(ce,{reportId:n,tool:h.Y.share,status:t,flavour:b,icon:"share",label:"Share",tooltip:"Share report",logPayload:rn(rn({},F),{},{description:"Share report"})}),(0,m.jsx)(p.A,{reportId:n,tool:h.Y.delete,status:t,flavour:b,onDelete:P,onSuccess:v,logPayload:rn(rn({},F),{},{description:"Delete report"})}),f?null:(0,m.jsx)(u.A,{tool:h.Y.close,status:t,flavour:b,icon:"x",onClick:v,tooltip:"Close report",logPayload:rn(rn({},F),{},{description:"Close report"})})]})}},34113(e,n,t){"use strict";t.d(n,{A:()=>d});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(96540),s=t(3319),r=t(74848);const A=["logPayload","onClick"];function c(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function l(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?c(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):c(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const d=e=>n=>{let{logPayload:t,onClick:o}=n,c=(0,i.A)(n,A);const{sendLog:d}=(0,s.A)(),u=(0,a.useCallback)((e=>{"function"===typeof o&&o(e),t&&d(t)}),[t,o,d]);return(0,r.jsx)(e,l(l({},c),{},{onClick:u}))}},48329(e,n,t){"use strict";t.d(n,{A:()=>d,m:()=>l});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(96540),s=t(17762),r=t(74848);const A=["getToolVisibility"];function c(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const l=e=>{let{status:n,tool:t,visibleByStatus:o}=e;return(o[n]||[]).includes(t)},d=e=>n=>{let{getToolVisibility:t=l}=n,d=(0,i.A)(n,A);const u=(0,a.useMemo)((()=>t({status:d.status,tool:d.tool,visibleByStatus:s.o})),[t,d.status,d.tool]);return(0,r.jsx)(e,function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?c(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):c(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({toolVisible:u},d))}},803(e,n,t){"use strict";t.d(n,{w:()=>o});const o={enabled:"ENABLED",paused:"PAUSED",systemDisabled:"SYSTEM_DISABLED"}},85702(e,n,t){"use strict";t.d(n,{A:()=>ae});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(96540),s=t(42358),r=t(51510),A=t(10444),c=t(54835),l=(t(62953),t(8872),t(82505));function d(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function u(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?d(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):d(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const h=e=>{let{recipientIds:n=[],onChange:t}=e;const o=n.reduce(((e,n)=>u(u({},e),{},{[n]:!0})),{}),[i,s]=(0,a.useState)(o),r=(0,l.di)(),A=(0,a.useMemo)((()=>(e=>e.reduce(((e,n)=>(e.push({name:n.name,email:n.email,user:{avatarURL:n.avatarURL,name:n.name,email:n.email,id:n.id},type:n.role}),e)),[]))(r)),[r]),c=(0,a.useMemo)((()=>Object.entries(i).reduce(((e,n)=>{let[t,o]=n;return o&&e.push(t),e}),[])),[i]);return(0,a.useEffect)((()=>{t(c)}),[c,t]),{data:A,columnVisibility:{name:!1},rowSelection:i,onRowSelectionChange:s}};var m=t(6084),g=t(74848);const p=[{id:"name",accessor:"name",header:"Name",cell:e=>{let{getValue:n}=e;return(0,g.jsx)(s.TextSmall,{children:n()})}},{id:"user",accessor:"user",header:"Users",cell:e=>{let{getValue:n}=e;const{name:t,avatarURL:o,email:i}=n();return(0,g.jsxs)(s.Flex,{alignItems:"center",gap:2,children:[(0,g.jsx)(m.A,{src:o,title:"".concat(t," - ").concat(i||"email not set")}),(0,g.jsx)(s.TextSmall,{children:t})]})}},{id:"email",accessor:"email",header:"Email",width:300,align:"center",cell:e=>{let{getValue:n}=e;return(0,g.jsx)(s.TextSmall,{children:n()})}}];function f(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function y(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?f(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):f(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const b=e=>{let{recipients:n,labelProps:t={},onChange:o}=e;const{data:i,columnVisibility:a,rowSelection:r,onRowSelectionChange:A}=h({recipientIds:n,onChange:o});return(0,g.jsxs)(g.Fragment,{children:[(0,g.jsx)(s.Text,y(y({color:"menuItem"},t),{},{children:"Add recipients"})),(0,g.jsx)(s.Flex,{height:{max:60},children:(0,g.jsx)(s.Table,{enableSelection:!0,enableSorting:!0,columnVisibility:a,data:i,dataColumns:p,rowSelection:r,onRowSelectionChange:A,getRowId:e=>{let{user:n}=e;return null===n||void 0===n?void 0:n.id},onSearch:()=>{}})})]})};var E=t(6304);function w(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function B(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?w(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):w(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const C=e=>{let{recipients:n=[],labelProps:t={},onChange:o}=e;const[i,a]=(0,E.A)(!!n.length);return(0,g.jsxs)(s.Flex,{column:!0,gap:2,children:[(0,g.jsxs)(s.Flex,{alignItems:"center",justifyContent:"between",children:[(0,g.jsx)(s.Text,B(B({color:"menuItem"},t),{},{children:"Email Notifications"})),(0,g.jsx)(s.Toggle,{colored:!0,checked:i,onChange:a,toggleProps:{uncheckedColor:["neutral","grey130"],checkedColor:"primary"}})]}),i?(0,g.jsxs)(g.Fragment,{children:[(0,g.jsx)(s.Text,{color:"placeholder",children:"A notification will be sent by default to the user who created the report."}),(0,g.jsx)(b,{recipients:n,labelProps:t,onChange:o})]}):null]})};var M=t(65408);function T(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function I(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?T(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):T(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const v={},_={recurrenceValue:M.T6.week},Q=e=>{let{name:n,rrule:t,recipients:o,onNameChange:i,onRuleChange:a,onRecipientsChange:r,initialSchedulerState:A={}}=e;return(0,g.jsxs)(s.Flex,{width:{min:70},column:!0,gap:4,padding:[2,0],children:[(0,g.jsxs)(s.Flex,{column:!0,gap:1,children:[(0,g.jsx)(s.Text,{color:"menuItem",children:"Report name"}),(0,g.jsx)(s.TextInput,{value:n||"",size:"small",round:1,onChange:i})]}),(0,g.jsx)(c.A,{rrule:t,labelProps:v,onRuleChange:a,initialState:I(I({},_),A)}),(0,g.jsx)(C,{recipients:o,labelProps:v,onChange:r})]})};var D=t(74891),x=t(94404),k=t(60908);const R=(0,x.A)((0,D.A)(s.Button)),S=(0,r.default)(s.ModalContent).attrs((e=>{let{isMobile:n}=e;return{width:n?{base:"95vw"}:{base:130}}})).withConfig({displayName:"modal__StyledModalContent",componentId:"sc-1ktexsa-0"})([""]),P=e=>{let{isEdit:n,reportName:t,rrule:o,recipients:i,isDisabled:a,isPauseDisabled:r,showPause:c,initialSchedulerState:l,onReportNameChange:d,onReportRuleChange:u,onReportRecipentsChange:h,onSubmit:m,onPauseClick:p,onClose:f}=e;const y=(0,A.J)(),b=a?"Provide a name and a recurring rule in order to schedule":null;return(0,g.jsx)(s.Modal,{backdropProps:{backdropBlur:!0},onClickOutside:f,onEsc:f,children:(0,g.jsx)(k.Ay,{feature:"InsightsReportSchedule",section:"scheduler-modal",children:(0,g.jsxs)(S,{"data-testid":"ai-credits-modal",isMobile:y,children:[(0,g.jsxs)(s.ModalHeader,{children:[(0,g.jsxs)(s.Flex,{gap:2,alignItems:"center",children:[(0,g.jsx)(s.Icon,{name:"clock_hollow",color:"text"}),(0,g.jsx)(s.H4,{children:"Schedule Report"})]}),(0,g.jsx)(s.ModalCloseButton,{onClose:f})]}),(0,g.jsx)(s.ModalBody,{children:(0,g.jsx)(Q,{name:t,rrule:o,recipients:i,onNameChange:d,onRuleChange:u,onRecipientsChange:h,initialSchedulerState:l})}),(0,g.jsx)(s.ModalFooter,{children:(0,g.jsxs)(s.Flex,{gap:2,justifyContent:"end",padding:[1,2],children:[n||c?(0,g.jsx)(R,{label:"Pause schedule",flavour:"hollow",icon:"pauseSolid",disabled:r,feature:"InsightsReportSchedule",payload:{description:"Pause report schedule"},onClick:p}):(0,g.jsx)(s.Button,{label:"Cancel",flavour:"hollow",onClick:f}),(0,g.jsx)(R,{label:n&&!r?"Update schedule":"Schedule",disabled:a,feature:"InsightsReportSchedule",payload:{description:"Schedule report"},tooltip:b,onClick:m})]})})]})})})};t(81454),t(37550);var F=t(30569),Y=t(63872),U=t(15505),N=t(46587),j=t(24609),z=t(19186),H=t(16922),O=t(81685),L=t(58247);const G=e=>{let{id:n,onSuccess:t,onFail:o}=e;const i=(0,j.vt)(),s=(0,z.ID)();return(0,a.useCallback)((e=>{(0,L.Pt)({spaceId:i,roomId:s,reportId:n,payload:e}).then((e=>{let{data:n}=e;"function"===typeof t&&t(n)})).catch((e=>{"function"===typeof o&&o(e)}))}),[i,s,n,t,o])};var J=t(88978),q=t(25877),K=t(803);const V=["recipientIds"];function X(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function W(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?X(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):X(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Z={name:"",rrule:"",recipientIds:[]},$=e=>{let{reportId:n,onSubmitProvided:t,onPause:o}=e;const s=(0,N.uW)("id"),r=(0,j.vt)(),A=(0,z.ID)(),c=(0,O.CF)({id:n,nested:!1}),{query:l}=(0,O.g0)(),[d,u]=(0,a.useState)({}),[h,,m,g]=(0,E.A)(),[p,f,,y]=(0,E.A)(),b=(0,O.fT)(),w=!(null===c||void 0===c||!c.rrule),B="function"===typeof o,C=h||!d.name||!d.rrule,M="function"!==typeof o&&(h||(null===c||void 0===c?void 0:c.status)!==K.w.enabled),{reset:T}=(0,O.Xh)({id:n}),[,,I,v]=(0,O.uN)(),[_,Q]=(0,Y.A)(),D=(0,a.useCallback)((e=>{let{target:n}=e;u((e=>W(W({},e),{},{name:n.value})))}),[u]),x=(0,a.useCallback)((e=>{const n=e?e.toString():null;u((e=>W(W({},e),{},{rrule:n})))}),[u]),k=(0,a.useCallback)((e=>{u((n=>W(W({},n),{},{recipientIds:e})))}),[u]),R=(0,F.yF)((0,a.useCallback)(((e,n,t)=>{g(),y(),n((0,H.Tj)({spaceId:r,roomId:A}),(e=>W(W({},e),{},{reports:e.reports.map((e=>e.id===t.id?W(W({},e),(0,U.bn)(t)):e))}))),T(),_({header:"Success",text:"You have successfully updated scheduled report"})}),[T,g,y,_])),S=(0,a.useCallback)((e=>{g(),Q(e)}),[g,Q]),P=G({id:n,onSuccess:R,onFail:S}),L=(0,F.yF)((0,a.useCallback)(((e,n,t)=>{y(),b(),n((0,H.Tj)({spaceId:r,roomId:A}),(e=>W(W({},e),{},{reports:[...e.reports,W(W({},(0,U.bn)(t)),{},{isScheduled:!0})]})))}),[b,y])),X=(0,J.A)({onSuccess:L}),$=(0,a.useCallback)((()=>{const{recipientIds:e}=d,n=(0,i.A)(d,V),o=(0,U.Jz)((0,q.a)(d.variables));"function"===typeof t?(t(W(W({},d),{},{variables:o})),y()):w?P(W(W(W(W({},d.name!==c.name?{name:d.name}:{}),d.rrule!==c.rrule?{rrule:d.rrule}:{}),d.recipientIds.length!==c.recipientIds.length||d.recipientIds.some((e=>!c.recipientIds.includes(e)))?{recipient_ids:d.recipientIds}:{}),{},{status:K.w.enabled})):X(W(W({},n),{},{recipient_ids:e,variables:o}))}),[w,d,t,y,X,P]),ee=(0,a.useCallback)((()=>{if("function"===typeof o)o(),u(Z),y();else if(w&&null!==c&&void 0!==c&&c.status){m();const e=c.status===K.w.enabled?K.w.paused:K.w.enabled;P({status:e})}}),[w,null===c||void 0===c?void 0:c.status,o,u,y]);return(0,a.useEffect)((()=>{u((e=>{let{isEdit:n,currentUserId:t,report:o,reportQuery:i}=e;return o?W(W(W({},Z),o),n?{}:{name:""}):i?W(W(W({},Z),i),{},{recipientIds:[t].filter(Boolean)}):Z})({isEdit:w,currentUserId:s,report:c,reportQuery:l}))}),[w,s,c,l,u]),(0,a.useEffect)((()=>{p?v():I()}),[p,I,v]),{isEdit:w,isSubmitDisabled:C,isPauseDisabled:M,reportName:d.name,rrule:d.rrule,recipients:d.recipientIds,showPause:B,onReportNameChange:D,onReportRuleChange:x,onReportRecipentsChange:k,isModalOpen:p,toggleModal:f,onSubmit:$,onPauseClick:ee,closeModal:y}};var ee=t(3319);const ne=["reportId","onSubmit","onPause","Accessor","accessorProps","initialSchedulerState"];function te(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function oe(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?te(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):te(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ie=(0,x.A)((0,D.A)(s.Button)),ae=e=>{let{reportId:n,onSubmit:t,onPause:o,Accessor:s,accessorProps:r={},initialSchedulerState:A={}}=e,c=(0,i.A)(e,ne);const{sendLog:l}=(0,ee.A)(),{isEdit:d,isSubmitDisabled:u,isPauseDisabled:h,reportName:m,rrule:p,recipients:f,showPause:y,onReportNameChange:b,onReportRuleChange:E,onReportRecipentsChange:w,isModalOpen:B,toggleModal:C,onSubmit:M,onPauseClick:T,closeModal:I}=$({reportId:n,onSubmitProvided:t,onPause:o}),v=(0,a.useCallback)((()=>{"function"===typeof r.onClick&&r.onClick(),C(),l(oe({feature:"InsightsReportSchedule",description:"Open scheduler modal"},r.logPayload||{}))}),[C,l,r.logDescription]);return(0,g.jsxs)(g.Fragment,{children:[s?(0,g.jsx)(s,oe({onClick:v},r)):(0,g.jsx)(ie,oe({label:"Schedule",icon:"clock_hollow",flavour:"hollow",feature:"InsightsReportSchedule",payload:{description:"Open scheduler modal"},tooltipProps:{align:"bottom"},onClick:C},c)),B?(0,g.jsx)(P,{isEdit:d,reportName:m,rrule:p,recipients:f,isDisabled:u,isPauseDisabled:h,showPause:y,initialSchedulerState:A,onReportNameChange:b,onReportRuleChange:E,onReportRecipentsChange:w,onSubmit:M,onPauseClick:T,onClose:I}):null]})}},30413(e,n,t){"use strict";t.d(n,{A:()=>ze});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(63950),s=t.n(a),r=(t(81454),t(96540)),A=t(42358),c=t(25383),l=t(24155),d=(t(62953),t(18121)),u=t(99094),h=t(74848);function m(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function g(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?m(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):m(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const p=e=>{let{id:n,name:t,onClick:o}=e;const[i,a]=(0,d.useHovered)({},[]),s=(0,r.useMemo)((()=>n===u.oG.investigation?"New ".concat(t):t),[n,t]),{icon:c,color:l,bg:m}=u.ci[n]||{},p=(0,r.useCallback)((()=>{"function"===typeof o&&o(n)}),[n,o]);return(0,h.jsx)(A.Flex,g(g({ref:i,height:"28px",alignItems:"center",justifyContent:"center",background:a?m:"panelBg",cursor:"pointer",round:!0,onClick:p},a?{border:{side:"all",color:l}}:{}),{},{children:(0,h.jsxs)(A.Flex,{alignItems:"center",gap:1,children:[(0,h.jsx)(A.Icon,{name:c,color:l}),(0,h.jsx)(A.Text,{color:l,children:s})]})}))};var f=t(51510);const y=(0,f.default)(A.Box).withConfig({displayName:"styled__MenuItemsGrid",componentId:"sc-18n69i2-0"})(["display:grid;grid-template-columns:1fr 1fr;gap:8px;"]);function b(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function E(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?b(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):b(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const w=e=>{let{label:n,icon:t,definitions:o=[],definitionIds:i=[],onItemClick:a}=e;const s=(0,r.useMemo)((()=>o.filter((e=>{let{id:n}=e;return i.includes(n)}))),[o,i]);return(0,h.jsxs)(A.Flex,{column:!0,gap:4,children:[(0,h.jsxs)(A.Flex,{alignItems:"center",gap:2,children:[(0,h.jsx)(A.Icon,{color:"menuItem",name:t}),(0,h.jsx)(A.Text,{strong:!0,color:"menuItem",children:n})]}),(0,h.jsx)(y,{"data-testid":"menu-items-grid",children:s.map((e=>(0,h.jsx)(p,E(E({},e),{},{onClick:a}),e.id)))})]})},B=()=>{const{loaded:e,definitions:n,error:t,onButtonClick:o}=(0,c.A)();return e?t?null:(0,h.jsx)(A.Flex,{column:!0,gap:4,padding:[0,4,4,4],children:u.I9.map((e=>(0,h.jsx)(w,E(E({definitions:n},e),{},{onItemClick:o}),e.id)))}):(0,h.jsx)(l.A,{})};var C=t(79997),M=t(81685);const T=()=>{const[e,n]=(0,M.OH)();return(0,h.jsx)(C.BN,{"data-testid":"insights-search-input-container",children:(0,h.jsx)(A.SearchInput,{value:e,placeholder:"Search insights",onChange:n,containerStyles:{width:"100%"}})})},I=["isActive","isLastItem"];function v(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function _(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?v(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):v(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Q=(0,f.default)(A.Flex).attrs((e=>{let{isActive:n,isLastItem:t}=e,o=(0,i.A)(e,I);return _(_(_({height:"42px",alignItems:"center",gap:2,padding:[2,2,2,4],cursor:"pointer"},n?{background:"highlight"}:{}),t?{}:{border:{side:"bottom",color:"border"}}),o)})).withConfig({displayName:"menuItem__MenuItemContainer",componentId:"sc-yylasy-0"})(["&:hover{background:",";}"],(0,A.getColor)("highlight")),D=e=>{let{id:n,label:t,onClick:o,isActive:i,isLastItem:a}=e;const s=(0,r.useCallback)((()=>{o({id:n})}),[n,o]);return(0,h.jsxs)(Q,{onClick:s,isActive:i,isLastItem:a,children:[(0,h.jsx)(A.TextBig,{children:t}),i?(0,h.jsx)(A.Icon,{name:"checkmark_s",color:"primaryAI",size:"large"}):null]},n)};function x(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function k(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?x(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):x(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const R=e=>{let{items:n=[],value:t,onChange:o}=e;return(0,h.jsx)(h.Fragment,{children:n.map(((e,i)=>{const a=e.id===t,s=i===n.length-1;return(0,h.jsx)(D,k(k({},e),{},{onClick:()=>{o(e.id)},isActive:a,isLastItem:s}),e.id)}))})},S=e=>{let{items:n,value:t,onChange:o,close:i}=e;const a=(0,r.useCallback)((e=>{o(e),i()}),[o,i]);return(0,h.jsx)(A.Flex,{width:{min:70},column:!0,border:{side:"all",color:"border"},children:(0,h.jsx)(R,{items:n,value:t,onChange:a})})};var P=t(6304),F=t(74891),Y=t(16402);const U=(0,F.A)(A.Button),N=e=>{let{showLabel:n,groupByLabel:t,tooltip:o,showValue:i,groupByValue:a,onShowChange:s,onGroupByChange:c}=e;const l=(0,r.useRef)(),d=(0,r.useRef)(),[u,m,,g]=(0,P.A)(),[p,f,,y]=(0,P.A)();return(0,h.jsxs)(h.Fragment,{children:[(0,h.jsxs)(A.Flex,{"data-testid":"insights-sidebar-menu-filter",alignItems:"center",gap:2,padding:[2,4,0,4],children:[(0,h.jsx)(U,{ref:l,flavour:"borderless",neutral:!0,small:!0,padding:[2,0],onClick:m,tooltip:o,children:(0,h.jsxs)(A.Flex,{alignItems:"center",gap:1,children:[(0,h.jsxs)(A.Text,{color:"menuItem",children:["Show ",(0,h.jsx)(A.Text,{color:"primaryAI",children:n})]}),(0,h.jsx)(A.Icon,{name:"chevron_left",rotate:u?1:3,size:"small",color:"text"})]})}),(0,h.jsx)(U,{ref:d,flavour:"borderless",neutral:!0,small:!0,padding:[2,0],onClick:f,tooltip:o,children:(0,h.jsxs)(A.Flex,{alignItems:"center",gap:1,children:[(0,h.jsxs)(A.Text,{color:"menuItem",children:["Group by ",(0,h.jsx)(A.Text,{color:"primaryAI",children:t})]}),(0,h.jsx)(A.Icon,{name:"chevron_left",rotate:p?1:3,size:"small",color:"text"})]})})]}),u&&null!==l&&void 0!==l&&l.current?(0,h.jsx)(A.Drop,{target:l.current,align:{top:"bottom",left:"right"},background:"modalBackground",margin:[2,0,0],round:.5,close:g,onClickOutside:g,onEsc:g,children:(0,h.jsx)(S,{items:Y.Ks,value:i,onChange:s,close:g})}):null,p&&null!==d&&void 0!==d&&d.current?(0,h.jsx)(A.Drop,{target:d.current,align:{top:"bottom",left:"right"},background:"modalBackground",margin:[2,0,0],round:.5,close:y,onClickOutside:y,onEsc:y,children:(0,h.jsx)(S,{items:Y.bS,value:a,onChange:c,close:y})}):null]})};function j(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function z(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?j(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):j(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const H=(0,f.default)(A.Flex).attrs((e=>z({padding:[2]},e))).withConfig({displayName:"menu__Section",componentId:"sc-rir5cm-0"})([""]),O=e=>{let{items:n=[],value:t,onChange:o}=e;return(0,h.jsx)(h.Fragment,{children:n.map(((e,i)=>{const a=e.id===t,s=i===n.length-1;return(0,h.jsx)(D,z(z({},e),{},{onClick:()=>{o(e.id)},isActive:a,isLastItem:s}),e.id)}))})},L=e=>{let{sortByValue:n,sortOrderValue:t,onSortByChange:o,onSortOrderChange:i}=e;return(0,h.jsxs)(A.Flex,{width:{min:70},column:!0,border:{side:"all",color:"border"},children:[(0,h.jsx)(H,{children:(0,h.jsx)(A.Text,{children:"Sort by"})}),(0,h.jsx)(O,{items:Y.Ih,value:n,onChange:o}),(0,h.jsx)(H,{children:(0,h.jsx)(A.Text,{children:"Order"})}),(0,h.jsx)(O,{items:Y.Rb,value:t,onChange:i})]})},G=(0,F.A)(A.Button),J=e=>{let{label:n,sortByValue:t,sortOrderValue:o,onSortByChange:i,onSortOrderChange:a}=e;const s=(0,r.useRef)(),[c,l,,d]=(0,P.A)();return(0,h.jsxs)(h.Fragment,{children:[(0,h.jsx)(A.Flex,{"data-testid":"insights-sidebar-menu-sort",padding:[2,4,0,4],children:(0,h.jsx)(G,{ref:s,flavour:"borderless",neutral:!0,small:!0,padding:[2,0],onClick:l,children:(0,h.jsxs)(A.Flex,{alignItems:"center",gap:1,children:[(0,h.jsx)(A.Text,{children:n}),(0,h.jsx)(A.Icon,{name:"chevron_left",rotate:c?1:3,size:"small",color:"text"})]})})}),c&&null!==s&&void 0!==s&&s.current?(0,h.jsx)(A.Drop,{target:s.current,align:{top:"bottom",left:"right"},background:"modalBackground",margin:[2,0,0],round:.5,close:d,onClickOutside:d,onEsc:d,children:(0,h.jsx)(L,{sortByValue:t,sortOrderValue:o,onSortByChange:i,onSortOrderChange:a})}):null]})};t(72577),t(37550);var q=t(96951),K=t(51220),V=t(61704),X=t(55630),W=t(41395);function Z(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function $(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Z(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Z(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ee=(0,F.A)(C.rk),ne={PENDING:"textLite",PROCESSING:"success",FAILED:"error",default:"neutral"},te=["PROCESSING"],oe=(0,r.memo)((e=>{let{status:n,isUnread:t,groupByValue:o,groupIndicatorVisible:i}=e;const a=(0,r.useMemo)((()=>!i&&("status"!==o||t)),[i,o,t]),s=(0,r.useMemo)((()=>({background:a?ne[n]||(t?"success":""):"",isBlinking:te.includes(n)})),[n,t,a]),A=(0,r.useMemo)((()=>a?t?"Unread report":n&&"COMPLETED"!==n?(0,W.Zr)(n.toLowerCase()):null:null),[t,n,a]);return(0,h.jsx)(ee,{tooltip:A,children:(0,h.jsx)(C.AT,$({},s))})})),ie=e=>{let{background:n}=e;return(0,h.jsx)(A.Box,{position:"absolute",top:0,bottom:0,left:0,width:"1px",background:n,noWrapper:!0})};var ae=t(35596),se=t(74518);const re=e=>{let{report:n={},groupByValue:t,sortByValue:o,flavour:i}=e;const a=(0,f.useTheme)(),s=(0,r.useMemo)((()=>{var e;return i===Y.BG.default&&(null===(e=Y.aA[o])||void 0===e?void 0:e.sortingGroupVisible)&&t===Y.fK.none}),[i,t,o]),c=(0,r.useMemo)((()=>{var e;return null===(e=Y.aA[o])||void 0===e?void 0:e.prop}),[o]),l=(0,r.useMemo)((()=>n[c]),[n,c]),d=(0,r.useMemo)((()=>{var e;return null===(e=Y.FX[o])||void 0===e?void 0:e[n["definitionName"===c?"definitionId":c]]}),[n,o,c]),u=(0,r.useMemo)((()=>(0,A.getRgbColor)(d,.05)({theme:a})),[d,a]);return{isVisible:s,groupName:l,background:d,secondaryBackground:u}};var Ae=t(3319),ce=t(25147);const le=["groupIndicatorVisible","secondaryBackground","onMouseEnter","onMouseLeave","children"],de=["report","groupByValue","sortByValue","flavour","onSelectionChange"];function ue(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function he(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?ue(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):ue(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const me=e=>{let{groupIndicatorVisible:n,secondaryBackground:t,onMouseEnter:o,onMouseLeave:a,children:s}=e,r=(0,i.A)(e,le);return(0,h.jsx)(A.Flex,he(he(he({position:"relative",alignItems:"center",justifyContent:"between",gap:1,padding:[2,1,2,0],onMouseEnter:o,onMouseLeave:a},n?{background:t}:{}),r),{},{children:s}))},ge=e=>{let{report:n,groupIndicatorVisible:t,secondaryBackground:o,onClick:i,onMouseEnter:a,onMouseLeave:s,isHovered:r,flavour:c,onSelectionChange:l}=e;const{id:d,childrenReports:u=[],rrule:m}=n,g=!!m,[p,f]=(0,P.A)();return c===Y.BG.chat?(0,h.jsx)(h.Fragment,{children:u.map((e=>(0,h.jsx)(pe,{report:e,flavour:c,onSelectionChange:l},e.id)))}):(0,h.jsxs)(h.Fragment,{children:[(0,h.jsxs)(me,{groupIndicatorVisible:t,secondaryBackground:o,onMouseEnter:a,onMouseLeave:s,padding:[2,1,2,4],children:[(0,h.jsxs)(A.Flex,{alignItems:"center",gap:1,children:[(0,h.jsx)(A.Icon,{name:"chevron_left",onClick:f,rotate:p?1:3,size:"small",color:"menuItem",cursor:"pointer"}),(0,h.jsxs)(A.Flex,{alignItems:"center",gap:1,onClick:i,cursor:"pointer",children:[g?(0,h.jsx)(A.Icon,{name:"clock_hollow",color:"menuItem"}):null,(0,h.jsx)(A.Text,{color:"menuItem",children:n.name}),(0,h.jsxs)(A.Text,{children:["(",u.length,")"]})]})]}),c===Y.BG.default&&r?(0,h.jsx)(ae.A,{reportId:d,status:status,isScheduledReport:g,flavour:"inlineMenu"}):null]}),p?(0,h.jsx)(A.Flex,{column:!0,padding:[0,0,0,4],children:u.map((e=>(0,h.jsx)(pe,{report:e,padding:[2,1,2,4],flavour:c},e.id)))}):null]})},pe=(0,r.memo)((e=>{let{report:n={},groupByValue:t,sortByValue:o,flavour:a,onSelectionChange:s}=e,c=(0,i.A)(e,de);const{id:l,name:d,status:u,isUnread:m,isScheduled:g}=n,[p,,f,y]=(0,P.A)(),{report:b,setReport:E}=(0,q.A)(),{activeConversation:w}=(0,V.A)(),B=(0,M.fT)(),C=b===l,[,,,T]=(0,M.W5)(),I=(0,X.A)({reportId:l}),{sendLog:v}=(0,Ae.A)(),_=a===Y.BG.default,Q=a===Y.BG.chat,{isSelected:D,isDisabled:x}=(0,r.useMemo)((()=>{var e;const n=(null===w||void 0===w||null===(e=w.associatedItemSelector)||void 0===e?void 0:e.selectedItems)||[],t=n.includes(l);return{isSelected:t,isDisabled:!t&&n.length===ce.C}}),[l,null===w||void 0===w?void 0:w.associatedItemSelector]),k=(0,r.useMemo)((()=>C?{color:"primary"}:he(he({},_&&"COMPLETED"===u?{cursor:"pointer"}:{}),{},{color:"menuItem"})),[C,_,u]),{isVisible:R,groupName:S,background:F,secondaryBackground:U}=re({report:n,groupByValue:t,sortByValue:o,flavour:a}),N=(0,r.useMemo)((()=>R?"".concat(d," | ").concat(S):d),[d,R,S]),j=(0,r.useCallback)((()=>{_&&("COMPLETED"===u||g)&&(T(),E(l),B(),v({feature:"Insights",description:"Report item menu click",reportId:l,reportStatus:u}))}),[l,T,u,g,_,E,B,v]),z=(0,r.useCallback)((e=>{Q&&s({value:l,checked:e})}),[l,Q,s]);return n.isScheduled?(0,h.jsx)(ge,{report:n,groupIndicatorVisible:R,secondaryBackground:U,onClick:j,onMouseEnter:f,onMouseLeave:y,isHovered:p,flavour:a,onSelectionChange:s}):(0,h.jsxs)(me,he(he({groupIndicatorVisible:R,secondaryBackground:U,onMouseEnter:f,onMouseLeave:y},c),{},{children:[R?(0,h.jsx)(ie,{background:F}):null,(0,h.jsxs)(A.Flex,{width:"230px",height:"18px",alignItems:"center",gap:1,onClick:j,children:[Q?(0,h.jsx)(A.Checkbox,{checked:D,disabled:x,onChange:z}):(0,h.jsx)(oe,{status:u,isUnread:m,groupByValue:t,groupIndicatorVisible:R}),(0,h.jsx)(se.A,he({TextComponent:A.Text,text:N,ellipsisEnd:!0},k))]}),_&&p?(0,h.jsx)(ae.A,{reportId:l,status:u,flavour:"inlineMenu",associateReport:I}):null]}))})),fe=pe,ye=["rowHeight","reports","onItemSelectionChange"];function be(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Ee(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?be(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):be(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const we=(0,r.memo)((e=>{let{rowHeight:n=34,reports:t=[],onItemSelectionChange:o}=e,a=(0,i.A)(e,ye);const s=(0,r.useRef)(),A=(0,r.useCallback)((()=>n),[n]),c=(0,K.Te)({count:t.length,getScrollElement:()=>s.current,enableSmoothScroll:!1,estimateSize:A}),l=(()=>{const e=document.getElementById("chat-composer-tool-report-selector-container");return e?e.offsetHeight-75:0})();return(0,h.jsx)("div",{ref:s,style:Ee(Ee({overflow:"auto"},a.flavour===Y.BG.chat?l>0?{maxHeight:"".concat(l,"px")}:{}:{maxHeight:a.inGroup?"200px":"calc(100vh - 420px)"}),a.inGroup?{}:{paddingLeft:"16px"}),children:(0,h.jsx)("div",{style:{minHeight:"".concat(c.getTotalSize(),"px"),width:"100%",position:"relative"},children:c.getVirtualItems().map((e=>(0,h.jsx)("div",{style:{position:"absolute",top:0,left:0,width:"100%",transform:"translateY(".concat(e.start,"px)"),overflow:"hidden"},ref:c.measureElement,"data-index":e.index,children:(0,h.jsx)(fe,Ee({report:t[e.index],onSelectionChange:o},a))},e.key)))})})}));function Be(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Ce(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Be(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Be(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Me=(0,f.default)(A.Flex).attrs((e=>Ce({column:!0,gap:2,padding:[3,2,3,4],border:{side:"bottom",color:"border"}},e))).withConfig({displayName:"groupItem__GroupItemContainer",componentId:"sc-158b8eq-0"})([""]),Te=(0,r.memo)((e=>{let{id:n,hasUnreadReports:t}=e;const{loaded:o,definitions:i,error:a}=(0,M.A_)(),s=(0,r.useMemo)((()=>{var e;return o&&!a?null===(e=i.find((e=>e.id===n)))||void 0===e?void 0:e.name:null}),[n,o,i,a]);return o?a?(0,h.jsx)(A.Text,{color:"error",children:"Error"}):(0,h.jsx)(A.Text,{strong:!0,color:t?"primary":"text",children:s}):(0,h.jsx)(l.A,{})})),Ie=(0,r.memo)((e=>{let{label:n,groupByValue:t,hasUnreadReports:o}=e;return"type"===t?(0,h.jsx)(Te,{id:n,hasUnreadReports:o}):"status"===t?(0,h.jsxs)(A.Flex,{alignItems:"center",gap:1,children:[(0,h.jsx)(C.rk,{children:(0,h.jsx)(C.AT,{background:Y.T$[n]||""})}),(0,h.jsx)(A.Text,{strong:!0,color:"menuItem",children:(0,W.Zr)(n.toLowerCase())})]}):(0,h.jsx)(A.Text,{strong:!0,color:o?"primary":"menuItem",children:n})})),ve=(0,F.A)(A.Flex),_e=e=>{let{label:n,groupByValue:t,reports:o=[],flavour:i,onItemSelectionChange:a}=e;const{report:s}=(0,q.A)(),[c]=(0,M.x)(),[l,d,u]=(0,P.A)(),m=(0,r.useMemo)((()=>o.some((e=>{let{id:n}=e;return c.includes(n)}))),[o,c]);return(0,r.useEffect)((()=>{o&&o.find((e=>{let{id:n}=e;return n===s}))&&u()}),[s,o,u]),(0,h.jsxs)(Me,{"data-testid":"insights-sidebar-menu-group-container",children:[(0,h.jsxs)(ve,Ce(Ce({alignItem:"center",gap:1,cursor:"pointer",onClick:d},m&&!l?{tooltip:"You have ".concat(c.length," unread reports")}:{}),{},{children:[(0,h.jsx)(A.Icon,{name:"chevron_left",rotate:l?1:3,size:"small",color:"menuItem"}),(0,h.jsx)(Ie,{label:n,groupByValue:t,hasUnreadReports:m}),(0,h.jsxs)(A.Text,{color:"menuItem",children:["(",o.length,")"]})]})),l?(0,h.jsx)(we,{inGroup:!0,reports:o,groupByValue:t,flavour:i,onItemSelectionChange:a}):null]})},Qe=e=>{let{groupByValue:n,sortByValue:t,groups:o=[],reports:i,flavour:a,onItemSelectionChange:s}=e;return n===Y.fK.none?(0,h.jsx)(we,{reports:i,groupByValue:n,sortByValue:t,flavour:a,onItemSelectionChange:s}):(0,h.jsx)(h.Fragment,{children:o.map((e=>(0,h.jsx)(_e,{label:e,groupByValue:n,reports:i[e],flavour:a,onItemSelectionChange:s},e)))})};function De(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function xe(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?De(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):De(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ke=e=>{let{flavour:n,menuContainerKey:t,label:o,showLabel:i,groupByLabel:a,sortLabel:s,showSorting:c,tooltip:l,groups:d,reports:u,showValue:m,groupByValue:g,sortByValue:p,sortOrderValue:f,onShowChange:y,onGroupByChange:b,onSortByChange:E,onSortOrderChange:w,onItemSelectionChange:B}=e;const M=(0,r.useMemo)((()=>n===Y.BG.chat?{height:"auto"}:{}),[n]);return(0,h.jsxs)(A.Flex,xe(xe({column:!0,gap:1,flex:"grow"},n!==Y.BG.chat?{border:{side:"top",color:"border"}}:{}),{},{children:[(0,h.jsxs)(A.Flex,{"data-testid":"insights-sidebar-menu-filter-sort-container",alignItems:"center",justifyContent:"between",children:[(0,h.jsx)(N,{label:o,showLabel:i,groupByLabel:a,tooltip:l,showValue:m,groupByValue:g,onShowChange:y,onGroupByChange:b}),c?(0,h.jsx)(J,{label:s,sortByValue:p,sortOrderValue:f,onSortByChange:E,onSortOrderChange:w}):null]}),(0,h.jsx)(T,{}),(0,h.jsx)(C.Hs,xe(xe({"data-testid":"insights-sidebar-menu-container"},M),{},{children:(0,h.jsx)(Qe,{groupByValue:g,sortByValue:p,groups:d,reports:u,flavour:n,onItemSelectionChange:B})}),t)]}))};t(46449),t(26910),t(93514),t(8872);var Re=t(427);function Se(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Pe(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Se(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Se(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Fe=()=>{const{locale:e}=(0,Re.bO)(),n=(0,r.useMemo)((()=>(e=>Array.from(Array(12).keys()).reduce(((n,t)=>Pe(Pe({},n),{},{[new Date(2025,t,1).toLocaleString(e||navigator.language,{month:"long"})]:t})),{}))(e)),[e]),[t,o]=(0,r.useState)(Y.q1),[i,a]=(0,r.useState)(Y.r5),[s,A]=(0,r.useState)(Y.ev),[c,l]=(0,r.useState)(Y.xn),d=(0,r.useMemo)((()=>{var e;return null===(e=Y.bS.find((e=>{let{id:n}=e;return n===i})))||void 0===e?void 0:e.prop}),[i]),{loaded:u,reports:h,allReports:m,error:g}=(0,M.KF)({prop:d,recents:t===Y.g.recents?Y.gk:0,sortBy:s,sortOrder:c}),p=(0,r.useMemo)((()=>u&&!g&&!!m.length),[u,m,g]),f=(0,r.useMemo)((()=>{var e;return null===(e=Y.Ks.find((e=>{let{id:n}=e;return n===t})))||void 0===e?void 0:e.label}),[t]),y=(0,r.useMemo)((()=>{var e;return null===(e=Y.bS.find((e=>{let{id:n}=e;return n===i})))||void 0===e?void 0:e.label}),[i]),b=(0,r.useMemo)((()=>"none"===i?f:"".concat(f," by ").concat(y)),[i,f,y]),E=(0,r.useMemo)((()=>{var e,n;const t=null===(e=Y.Ih.find((e=>{let{id:n}=e;return n===s})))||void 0===e?void 0:e.label,o=null===(n=Y.Rb.find((e=>{let{id:n}=e;return n===c})))||void 0===n?void 0:n.label;return"Sort by ".concat(t.toLowerCase()," ").concat(o.toLowerCase())}),[s,c]),w=(0,r.useMemo)((()=>i===Y.fK.none),[i]),B=(0,r.useMemo)((()=>{let e=t===Y.g.recents?"See ".concat(Y.gk," most recent reports"):"See all reports";return i===Y.fK.type?e+=" grouped by type":i===Y.fK.status?e+=" grouped by status":i===Y.fK.month&&(e+=" grouped by month created"),e}),[t,i]),C=(0,r.useMemo)((()=>{if(Array.isArray(h))return[];const e=Object.keys(h);return"month"===d?e.sort(((e,t)=>{const[o,i]=e.split(" "),[a,s]=t.split(" ");return parseInt(i,10)!==parseInt(s,10)?s-i:o!==a?n[a]-n[o]:0})):e}),[d,h,n]),T="".concat((Array.isArray(h)?h:Object.values(h).flat()).map((e=>{let{id:n}=e;return n})),"-sortBy-").concat(s),I=(0,r.useCallback)((()=>{}),[]);return{key:T,label:b,showLabel:f,groupByLabel:y,sortLabel:E,showSorting:w,tooltip:B,groups:C,loaded:u,hasReports:p,reports:h,error:g,showValue:t,groupByValue:i,sortByValue:s,sortOrderValue:c,onShowChange:o,onGroupByChange:a,onSortByChange:A,onSortOrderChange:l,onClick:I}},Ye=e=>{let{flavour:n}=e;return(0,h.jsx)(A.Flex,{column:!0,gap:5,alignItems:"center",justifyContent:"center",padding:[8,4],children:n===Y.BG.chat?(0,h.jsx)(h.Fragment,{children:(0,h.jsx)(A.Text,{textAlign:"center",children:"No reports found."})}):(0,h.jsxs)(h.Fragment,{children:[(0,h.jsx)(A.Text,{textAlign:"center",children:"No reports yet! Click one of the buttons below to create your first one."}),(0,h.jsx)(B,{})]})})},Ue=["key","hasReports"];function Ne(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function je(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Ne(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Ne(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ze=function(){let{flavour:e=Y.BG.default,onItemSelectionChange:n=s()}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const t=Fe(),{key:o,hasReports:a}=t,r=(0,i.A)(t,Ue);return a?(0,h.jsxs)(h.Fragment,{children:[e===Y.BG.default?(0,h.jsx)(B,{}):null,(0,h.jsx)(ke,je({flavour:e,menuContainerKey:o,onItemSelectionChange:n},r))]}):(0,h.jsx)(Ye,{flavour:e})}},25383(e,n,t){"use strict";t.d(n,{A:()=>r});t(62953);var o=t(96540),i=t(81685),a=t(98783),s=t(96951);const r=()=>{const{loaded:e,definitions:n,error:t}=(0,i.A_)(),{onNewQuery:r}=(0,i.g0)(),A=(0,a.A)(),{reset:c}=(0,s.A)(),[,,,l]=(0,i.W5)();return{loaded:e,definitions:n,error:t,onButtonClick:(0,o.useCallback)((e=>{c(),l(),r(e,{context:A})}),[r,A,c,l])}}},79997(e,n,t){"use strict";t.d(n,{AQ:()=>D,AT:()=>Q,BN:()=>f,DJ:()=>B,Hs:()=>y,J3:()=>b,Mx:()=>C,RB:()=>T,VW:()=>w,_V:()=>m,jI:()=>I,oQ:()=>M,oe:()=>g,p6:()=>p,qm:()=>x,rk:()=>_,yB:()=>E});t(98992),t(54520),t(3949);var o=t(80045),i=t(64467),a=t(51510),s=t(42358),r=t(99094);const A=["isActive"],c=["isActive"],l=["collapsed"],d=["itemsNum"];function u(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function h(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?u(Object(t),!0).forEach((function(n){(0,i.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):u(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const m=(0,a.default)(s.Box).attrs((e=>h({height:"100%"},e))).withConfig({displayName:"styled__LayoutContainer",componentId:"sc-v88w2r-0"})(["display:grid;grid-template-columns:430px auto;gap:8px;"]),g=(0,a.default)(s.Flex).attrs((e=>h({position:"relative",height:"100%",column:!0,flex:"grow",border:{side:"right",color:"border"},overflow:{vertical:"auto"},padding:[4,0,0,0]},e))).withConfig({displayName:"styled__SidebarContainer",componentId:"sc-v88w2r-1"})([""]),p=(0,a.default)(s.Flex).attrs((e=>h({position:"absolute",top:"12px",right:"12px",alignItems:"center",gap:2},e))).withConfig({displayName:"styled__LabelsContainer",componentId:"sc-v88w2r-2"})([""]),f=(0,a.default)(s.Flex).attrs((e=>h({alignItems:"center",gap:2,padding:[0,4]},e))).withConfig({displayName:"styled__SidebarSearch",componentId:"sc-v88w2r-3"})([""]),y=(0,a.default)(s.Flex).attrs((e=>h({column:!0,height:"calc(100vh - 420px)",overflow:{vertical:"auto"}},e))).withConfig({displayName:"styled__SidebarMenuContainer",componentId:"sc-v88w2r-4"})([""]),b=((0,a.default)(s.Flex).attrs((e=>h({column:!0,gap:2,padding:[4],border:{side:"bottom",color:"border"}},e))).withConfig({displayName:"styled__SidebarMenuItemContainer",componentId:"sc-v88w2r-5"})([""]),(0,a.default)(s.Flex).attrs((e=>h({column:!0,overflow:{vertical:"auto"}},e))).withConfig({displayName:"styled__MainContainer",componentId:"sc-v88w2r-6"})([""])),E=(0,a.default)(s.Box).withConfig({displayName:"styled__ReportBodyContainer",componentId:"sc-v88w2r-7"})(["display:grid;grid-template-columns:220px auto;gap:16px;"]),w=(0,a.default)(s.Flex).attrs((e=>{let{isActive:n}=e;return h({border:{side:"left",color:n?"primary":"border"}},(0,o.A)(e,A))})).withConfig({displayName:"styled__TocItemContainer",componentId:"sc-v88w2r-8"})(["&:hover{border-color:",";}"],(0,s.getColor)("primary")),B=(0,a.default)(s.Text).attrs((e=>{let{isActive:n}=e;return h({color:n?"primary":"text",padding:[2,4]},(0,o.A)(e,c))})).withConfig({displayName:"styled__TocItemText",componentId:"sc-v88w2r-9"})(["&:hover{text-decoration:none;}"]),C=(0,a.default)(s.Flex).attrs((e=>{let{collapsed:n}=e,t=(0,o.A)(e,l);return h(h({gap:4},n?{}:{height:"".concat(r.xV,"px"),border:{side:"top",color:"border"}}),t)})).withConfig({displayName:"styled__RecentReportsContainer",componentId:"sc-v88w2r-10"})([""]),M=(0,a.default)(s.Flex).attrs((e=>{let{itemsNum:n}=e,t=(0,o.A)(e,d);return h({width:"".concat(100/n,"%"),height:"".concat(r.x0,"px"),column:!0,gap:3,padding:[4],background:"panelBg",round:!0,justifyContent:"between"},t)})).withConfig({displayName:"styled__ReportPreviewItem",componentId:"sc-v88w2r-11"})([""]),T=(0,a.default)(s.Flex).attrs((e=>h({position:"relative",height:"calc(100% - 20px)",column:!0,gap:3,overflow:"hidden"},e))).withConfig({displayName:"styled__ReportPreviewItemContent",componentId:"sc-v88w2r-12"})([""]),I=(0,a.default)(s.Box).attrs((e=>h({position:"absolute",bottom:0,width:"100%",height:"20px"},e))).withConfig({displayName:"styled__ReportPreviewItemContentShadow",componentId:"sc-v88w2r-13"})(["background:linear-gradient(to bottom,transparent 40%,"," 90%);"],(0,s.getColor)("panelBg")),v=(0,a.css)(["animation:",";@keyframes blinking{0%{opacity:0;}50%{opacity:1;}100%{opacity:0;}}"],(e=>{let{isBlinking:n}=e;return n?"blinking 1.6s ease-in infinite":""})),_=(0,a.default)(s.Flex).attrs((e=>h({alignItems:"center",justifyContent:"center",padding:[1]},e))).withConfig({displayName:"styled__ReportStatusDotContainer",componentId:"sc-v88w2r-14"})([""]),Q=(0,a.default)(s.Box).attrs((e=>h({width:"6px",height:"6px",round:"50%"},e))).withConfig({displayName:"styled__ReportStatusDot",componentId:"sc-v88w2r-15"})(["",""],v),D=(0,a.default)(s.Icon).withConfig({displayName:"styled__IconBlinking",componentId:"sc-v88w2r-16"})(["",""],v),x=(0,a.default)(s.Box).attrs((e=>h({padding:[2,4],round:4},e))).withConfig({displayName:"styled__TextBubble",componentId:"sc-v88w2r-17"})([""])},96951(e,n,t){"use strict";t.d(n,{A:()=>A});var o=t(96540),i=t(41344),a=t(24609),s=t(19186),r=t(99094);const A=()=>{const e=(0,a.bq)(),n=(0,s.QW)(),t=(0,i.Zp)(),A=(0,i.RQ)("/spaces/:spaceSlug/rooms/:roomSlug/insights/report-preview/:reportId"),{reportId:c}=(null===A||void 0===A?void 0:A.params)||{};return{report:c,setReport:(0,o.useCallback)((o=>{t("/spaces/".concat(e,"/rooms/").concat(n,"/").concat(r.WJ,"/").concat(o))}),[e,n,t]),reset:(0,o.useCallback)((()=>{t("/spaces/".concat(e,"/rooms/").concat(n,"/insights"))}),[e,n,t])}}},42849(e,n,t){"use strict";t.d(n,{A:()=>a});var o=t(64587),i=t(79022);const a=e=>{let{reportDate:n,showTime:t=!0,dateOptions:a={},timeOptions:s={}}=e;const{localeDateString:r,localeTimeString:A}=(0,o.$j)();return(0,i.PP)({reportDate:n,showTime:t,dateOptions:a,timeOptions:s,localeDateString:r,localeTimeString:A})}},75250(e,n,t){"use strict";t.d(n,{A:()=>a});var o=t(81685),i=t(79022);const a=e=>{let{id:n}=e;const{loaded:t,report:a}=(0,o.ef)({id:n}),{loaded:s,report:r}=(0,o.Xh)({id:n}),A=t&&a?a:s&&r?r:null;return A?(null===A||void 0===A?void 0:A.name)||(0,i.Gb)({report:A}):""}},50100(e,n,t){"use strict";t.d(n,{A:()=>r});t(62953);var o=t(96540),i=t(41344),a=t(61841),s=t(99094);const r=()=>{const e=(0,i.Zp)(),[n,t]=(0,a.Ay)();return{goToReport:(0,o.useCallback)((o=>{o&&e("/spaces/".concat(n,"/rooms/").concat(t,"/").concat(s.p9,"/").concat(o))}),[n,t,e]),getUrl:(0,o.useCallback)((e=>{const{origin:o}=window.location;return"".concat(o,"/spaces/").concat(n,"/rooms/").concat(t,"/").concat(s.p9,"/").concat(e)}),[n,t])}}},22794(e,n,t){"use strict";t.d(n,{A:()=>te});t(98992),t(54520),t(3949);var o=t(64467),i=t(96540),a=t(54852),s=t(80045),r=t(51510),A=t(42358),c=t(79748),l=t(30005),d=t(37846),u=t(92318),h=t(74848);const m=["level"],g=["children","href"],p=["content"],f=["children"],y=["children"],b=["ordered","children"],E=["children"];function w(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function B(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?w(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):w(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const C=r.default.article.withConfig({displayName:"nodes__StyledArticle",componentId:"sc-z8xzec-0"})(["padding-right:8px;padding-bottom:16px;"]),M=r.default.p.withConfig({displayName:"nodes__StyledParagraph",componentId:"sc-z8xzec-1"})(["margin-top:14px;line-height:1.5;"]),T=(0,r.default)(l.Ay).withConfig({displayName:"nodes__StyledCommand",componentId:"sc-z8xzec-2"})(["padding:8px;padding-right:32px;margin-top:8px;"]),I=r.default.ol.withConfig({displayName:"nodes__OrderedList",componentId:"sc-z8xzec-3"})(["list-style:roman;padding-left:16px;line-height:2;"]),v=r.default.ul.withConfig({displayName:"nodes__UnorderedList",componentId:"sc-z8xzec-4"})(['list-style-image:url("','/img/list-style-image.svg");padding-left:16px;line-height:2;'],u.A.assetsBaseURL),_={document:{render:C},heading:{render:e=>{let{level:n=1}=e,t=(0,s.A)(e,m);const o={1:A.H1,2:A.H2,3:A.H3,4:A.H4,5:A.H5,6:A.H6}[n]||A.H1,i=n<=3?6-n:2;return(0,h.jsx)(o,B({margin:[i,0,2,0]},t))},attributes:{id:{type:String},level:{type:Number}}},paragraph:{render:M},link:{render:e=>{let{children:n,href:t}=e,o=(0,s.A)(e,g);return(0,h.jsx)(c.A,B(B({href:t,rel:"noopener noreferrer",target:"_blank"},o),{},{children:n}))},attributes:{href:{type:String}}},code:{render:e=>{let{content:n}=e,t=(0,s.A)(e,p);return(0,h.jsx)(l.R0,B(B({as:"span"},t),{},{children:n}))},attributes:{content:{type:String}}},fence:{render:T,attributes:{content:{type:String}}},list:{render:e=>{let{ordered:n,children:t}=e,o=(0,s.A)(e,b);const i=n?I:v;return(0,h.jsx)(i,B(B({},o),{},{children:t}))},attributes:{ordered:{type:Boolean}}},table:{render:e=>{let{children:n}=e,t=(0,s.A)(e,E);return(0,h.jsx)(d.gY,B(B({},t),{},{children:n}))}},strong:{render:e=>{let{children:n}=e,t=(0,s.A)(e,f);return(0,h.jsx)(A.Text,B(B({strong:!0,fontSize:"14px"},t),{},{children:n}))}},blockquote:{render:e=>{let{children:n}=e,t=(0,s.A)(e,y);return(0,h.jsx)(A.Flex,B(B({padding:[0,4,3,4],background:"modalTabsBackground",border:{side:"left",size:"3px",color:"border"}},t),{},{children:n}))}}},Q=e=>{let{summary:n="",open:t=!1,children:o}=e;return(0,h.jsx)(A.Flex,{margin:[2,0,0,0],children:(0,h.jsxs)("details",{open:t,children:[(0,h.jsx)("summary",{children:(0,h.jsx)(A.Text,{strong:!0,children:n})}),o]})})};t(33811),t(86994),t(81454),t(41795),t(62953);var D=t(67723);const x=e=>{let{children:n}=e;const[t,o]=(0,i.useState)(0),a=i.Children.toArray(n).map((e=>{let{props:n}=e;return n}));return(0,h.jsxs)(A.Flex,{column:!0,width:"100%",margin:[4,0,0,0],children:[(0,h.jsx)(A.Flex,{border:{side:"bottom",size:"1px",color:"borderSecondary"},padding:[0,4],children:(0,h.jsx)(A.Tabs,{selected:t,onChange:o,children:a.map((e=>{let{label:n}=e;return(0,h.jsx)(A.Tab,{label:(0,h.jsx)(A.Text,{children:n}),minWidth:"auto",maxWidth:"auto",padding:[1,4],background:"mainBackground",small:!0},n)}))})}),(0,h.jsx)(A.Flex,{flex:!0,children:(0,h.jsx)(D.A,{noMargin:!0,children:a[t].children})})]})};var k=t(42790),R=t(41344),S=t(24609),P=t(61661),F=t(32465);const Y=e=>{let{children:n,onClick:t}=e;return(0,h.jsx)(A.Box,{as:"a",cursor:"pointer",onClick:t,children:n})},U=e=>{let{categoryId:n,navigateToSettings:t,children:o}=e;const a=(0,R.Zp)(),s=(0,k.rI)("selectedIntegrationCategory"),r=(0,S.bq)(),{pushCategory:A}=(0,P.b8)(),c=(0,F.A)(),l=(0,i.useCallback)((()=>{t?(s("deploy.docker-kubernetes"),a({pathname:"/spaces/".concat(r,"/settings/integrations"),replace:!0})):A(c(n))}),[A,n,c,t]);return(0,h.jsx)(Y,{onClick:l,children:o})},N=(0,i.memo)(U);t(27495),t(25440),t(79978),t(72577);var j=t(46587),z=t(99728);const H=e=>{let{showClaimingOptions:n,command:t="",claimToken:o="",claimUrl:i="",claimRooms:a=""}=e;if(n)return t.replaceAll(/{% if \$showClaimingOptions %}\n?/gi,"").replaceAll(/{% \/if %}\n?/gi,"").replaceAll(/{% claim_token %}/gi,o).replaceAll(/{% claim_url %}/gi,i).replaceAll(/{% \$claim_rooms %}/gi,a);return t.replaceAll(/{%\s*if\s*\$showClaimingOptions\s*%}[\s\S]*?{%\s*\/if\s*%}\n?/g,"")},O=e=>{var n;let{methods:t,isNightly:o,claimToken:i="",claimUrl:a="",claimRooms:s=""}=e;const r=o?"nightly":"stable",{showClaimingOptions:c}=(()=>{const e=(0,j.uW)("isAnonymous"),n=(0,z.JT)("node:Create");return{showClaimingOptions:!e&&n}})();return t.length?c&&!i?(0,h.jsx)(A.Flex,{padding:[8,0],children:(0,h.jsx)(A.Text,{children:"Loading token..."})}):t.length>1?(0,h.jsx)(x,{children:t.map((e=>{let{method:n,commands:t}=e;const{command:o}=t.find((e=>e.channel==r))||{};return(0,h.jsx)(A.Tab,{label:n,children:H({showClaimingOptions:c,command:o,claimToken:i,claimUrl:a,claimRooms:s})},n)}))}):(0,h.jsx)(D.A,{children:H({showClaimingOptions:c,command:((null===(n=t[0])||void 0===n?void 0:n.commands.find((e=>e.channel==r)))||{}).command||"",claimToken:i,claimUrl:a,claimRooms:s})}):null};var L=t(55024),G=t(34412);const J=["label"];function q(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function K(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?q(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):q(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const V=(0,r.default)(G.D).attrs((e=>K({cursor:"pointer"},e))).withConfig({displayName:"aiCredits__AccessorText",componentId:"sc-1l3ljy4-0"})(["text-decoration:underline;"]),X=e=>{let{label:n}=e,t=(0,s.A)(e,J);return(0,h.jsx)(V,K(K({},t),{},{children:n}))},W=e=>{let{label:n}=e;return(0,h.jsx)(L.A,{Accessor:X,label:n})},Z={nodes:_,tags:{details:{render:Q,attributes:{summary:{type:String,default:""},open:{type:Boolean,default:!1}}},tabs:{render:x},tab:{render:A.Tab,attributes:{label:{type:String,default:""}}},terminal:{render:D.A},goToCategory:{render:N,attributes:{categoryId:{type:String,default:""},navigateToSettings:{type:Boolean,default:!1}}},command:{render:O,attributes:{methods:{type:Array,default:[]},isNightly:{type:Boolean,default:!1},claimToken:{type:String,default:""},claimUrl:{type:String,default:""},claimRooms:{type:String,default:""}}},aiCredits:{render:W,attributes:{label:{type:String,default:"AI Credits"}}}}};function $(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function ee(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?$(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):$(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ne=e=>{let{config:n={},children:t}=e;return(0,h.jsx)(a.A,{transformConfiguration:ee(ee({},Z),n),children:t})},te=(0,i.memo)(ne)},19844(e,n,t){"use strict";t.d(n,{f:()=>U,A:()=>H});var o=t(64467),i=t(80045),a=(t(98992),t(54520),t(3949),t(62953),t(96540)),s=t(42358),r=t(95565),A=t(38773),c=t(51262),l=t(60908),d=t(74848);const u=()=>{const{loaded:e,hasLimitations:n,maxNodes:t,preferredNodes:o}=(0,c.A)();return n&&e&&o.length==t?(0,d.jsx)(l.Ay,{feature:"ActiveNodesLimitWarning",children:(0,d.jsxs)(s.Flex,{alignItems:"center",gap:2,children:[(0,d.jsx)(s.Flex,{children:(0,d.jsx)(s.Icon,{color:"warning",name:"warning_triangle"})}),(0,d.jsxs)(s.TextBig,{children:["If you connect a new node you'll"," ",(0,d.jsxs)(s.TextBig,{strong:!0,children:["exceed the limit of ",t," active Nodes"]}),". To unblock the new node, either"," ",(0,d.jsx)(r.A,{children:(0,d.jsx)(s.TextBig,{color:"primary",children:"upgrade to the Business plan"})})," ","for unlimited access or"," ",(0,d.jsx)(A.A,{children:(0,d.jsx)(s.TextBig,{color:"primary",children:"review your Space active Nodes on the Setting page"})}),"."]})]})}):null},h=(0,a.memo)(u);var m=t(24609),g=t(99728),p=t(39175);const f=()=>{const e=(0,m.vt)(),n=(0,p.ES)(e),t=(0,g.JT)("node:Create");return n||t?null:(0,d.jsx)(l.Ay,{feature:"IntegrationsNodeCreatePermissionWarning",children:(0,d.jsxs)(s.Flex,{gap:2,background:"warningSemi",padding:[2],children:[(0,d.jsx)(s.Icon,{size:"medium",color:"text",name:"warning_triangle"}),(0,d.jsx)(s.TextBig,{children:"You don't have permissions to connect new nodes to the Space. Please contact an administrator to do it or just install Netdata on your node."})]})})};var y=t(22794),b=t(55429),E=t(19186),w=t(29645);function B(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const C=(0,t(51510).default)(s.Collapsible).attrs((e=>function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?B(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):B(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({column:!0},e))).withConfig({displayName:"group__StyledCollapsible",componentId:"sc-shp4vh-0"})(["border-style:solid;border-color:",";border-left-width:1px;border-right-width:1px;border-bottom-width:1px;",""],(0,s.getColor)("successSemi"),(e=>{let{open:n}=e;return n?"":"border: none;"})),M=e=>{let{title:n,isOpen:t,onClick:o}=e;return(0,d.jsxs)(s.Flex,{alignItems:"center",justifyContent:"between",padding:[2],border:{side:"all",color:"successSemi"},cursor:"pointer",onClick:o,children:[(0,d.jsx)(s.Text,{color:"primary",children:n}),(0,d.jsx)(s.Icon,{name:"chevron_left",size:"small",color:"primary",rotate:t?1:3})]})},T=e=>{let{title:n,isOpen:t,toggleOpen:o,children:i}=e;return(0,d.jsxs)(s.Flex,{column:!0,children:[(0,d.jsx)(M,{title:n,isOpen:t,onClick:o}),(0,d.jsx)(C,{open:t,children:i})]})};t(81454);var I=t(30005);function v(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function _(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?v(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):v(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Q={background:"pnlBackground",padding:[.5,8,.5,1],border:"none",iconContainerProps:{right:"0",bottom:"4px"}},D=e=>{let{items:n=[]}=e;return(0,d.jsx)(d.Fragment,{children:n.map((e=>{let{name:n,commandText:t}=e;return(0,d.jsxs)(s.Flex,{alignItems:"center",children:[(0,d.jsx)(s.Flex,{width:"90px",padding:[1,1,1,2],children:(0,d.jsx)(s.TextSmall,{color:"menuItem",children:n})}),(0,d.jsx)(s.Flex,{padding:[1,2,1,1],children:(0,d.jsx)(I.Ay,_(_({commandText:t},Q),{},{children:(0,d.jsx)(s.TextSmall,{color:"menuItem",children:t})}))})]},n)}))})},x=(0,a.memo)(D),k=e=>{let{items:n=[]}=e;return(0,d.jsx)(d.Fragment,{children:n.map((e=>{let{icon:n,text:t}=e;return(0,d.jsxs)(s.Flex,{padding:[1,2],gap:2,alignItems:"center",children:[(0,d.jsx)(s.Icon,{name:n,color:"menuItem",size:"small"}),(0,d.jsx)(s.TextSmall,{color:"menuItem",children:t})]},t)}))})},R=(0,a.memo)(k);var S=t(6304);const P=["startLoading"];function F(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Y(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?F(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):F(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const U=(0,a.createContext)(),N=(j=y.A,e=>{let{startLoading:n}=e,t=(0,i.A)(e,P);return"function"===typeof n?(0,d.jsx)(U.Provider,{value:{startLoading:n},children:(0,d.jsx)(j,Y({},t))}):(0,d.jsx)(j,Y({},t))});var j;const z=[{icon:"networkingStack",text:"Installs dependencies using your system's package manager."},{icon:"download",text:"Downloads Netdata and verifies checksum for security."},{icon:"gear",text:"Runs the installer and sets it up as a system service."}],H=e=>{let{integration:n={},rooms:t=[],navigateToSettings:o,startLoading:i,detailsOpen:r=!0}=e;const{deployContent:A,methods:c}=n,l=(0,m.vt)(),u=(0,E.pr)(),g=(0,E.XA)(),y=null!==g&&void 0!==g&&g.loaded?g:u,B=null===y||void 0===y?void 0:y.id,C=(0,b.A)(l),M=window.envSettings.apiUrl,I=(0,p.ng)(String(t.length?t.join(","):B)),[v,_]=(0,S.A)(),[Q,D]=(0,S.A)(r),k=(0,w.a)(),P=(0,a.useMemo)((()=>"nightly"==k),[k]),F=(0,a.useMemo)((()=>({variables:{methods:c,isNightly:P,claimToken:null===C||void 0===C?void 0:C.token,claimUrl:M,claimRooms:I,navigateToSettings:o}})),[c,P,C,M,I,o]),Y=(0,a.useMemo)((()=>[{name:"Claim Token",commandText:null===C||void 0===C?void 0:C.token},{name:"Claim URL",commandText:M},{name:"Room IDs",commandText:I}]),[C,M,I]);return(0,d.jsxs)(s.Flex,{width:"100%",column:!0,padding:[4,0,0,0],children:[(0,d.jsx)(h,{}),(0,d.jsx)(f,{}),(0,d.jsx)(N,{config:F,startLoading:i,children:A}),(0,d.jsx)(T,{title:"What does this do?",isOpen:v,toggleOpen:_,children:(0,d.jsx)(R,{items:z})}),!(null===C||void 0===C||!C.token)&&(0,d.jsx)(T,{title:"Show claiming details",isOpen:Q,toggleOpen:D,children:(0,d.jsx)(x,{items:Y})})]})}},29147(e,n,t){"use strict";t.d(n,{A:()=>v});var o=t(64467),i=(t(89463),t(98992),t(54520),t(3949),t(62953),t(96540)),a=t(42358),s=t(79748),r=t(45087),A=t(74891),c=t(77148),l=t(1174),d=(t(9391),t(30569)),u=t(24609),h=t(84308),m=t(46391),g=t(6304),p=t(63872);const f=()=>{const e=(0,u.vt)(),[n,,t,o]=(0,g.A)(),[a,s]=(0,p.A)();return{loading:n,recycleToken:(0,d.yF)((0,i.useCallback)(((n,i)=>{t(),(0,h.X)(e).then((n=>{let{data:t}=n;i((0,m.A)(e),t),a({text:"Successfully expired last token and created a new one."})})).catch(s).finally((()=>{o()}))}),[e,t,o,a,s]))}};var y=t(29645),b=t(52419),E=t(94404),w=t(74848);function B(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function C(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?B(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):B(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const M=(0,E.A)((0,A.A)(a.Button)),T={left:"stable",right:"nightly"},I=e=>{let{right:n}=e;const{title:t,description:o}=b.d[T[n?"right":"left"]]||{};return(0,w.jsx)(r.A,{content:o,align:"bottom",children:(0,w.jsx)(a.Text,{padding:n?[0,0,0,1]:[0,1,0,0],children:t})})},v=function(){let{vertical:e,nested:n}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const[t,o]=(0,y.$)(),{loading:r,recycleToken:A}=f(),d=(0,i.useMemo)((()=>C({column:e,gap:e?2:4},e?{width:"100%"}:{})),[e]),u=(0,i.useCallback)((e=>{const n=e.target.checked?"nightly":"stable";o(n)}),[o]);return(0,i.useEffect)((()=>{n||o(null)}),[n,o]),(0,w.jsxs)(a.Flex,C(C({},d),{},{children:[(0,w.jsxs)(a.Flex,{column:!0,gap:1,children:[(0,w.jsx)(s.A,{href:"https://learn.netdata.cloud/docs/getting-started/install-netdata#nightly-vs-stable-releases",rel:"noopener noreferrer",target:"_blank",children:(0,w.jsxs)(a.Flex,{alignItems:"center",gap:1,children:[(0,w.jsx)(a.Text,{color:"primary",children:"Updates channel"}),(0,w.jsx)(a.Icon,{color:"primary",name:"goToNode",width:"18px",height:"18px"})]})}),(0,w.jsx)(a.Toggle,{labelLeft:"Stable",labelRight:"Nightly",Label:I,colored:!1,checked:"nightly"==t,disabled:!1,onChange:u})]}),(0,w.jsx)(c.A,{color:"border",vertical:!e}),(0,w.jsx)(l.A,{permission:"node:Create",children:e=>(0,w.jsx)(M,{width:"100%",feature:"ClaimingToken",label:"Regenerate Token",flavour:"hollow",icon:"refresh",onClick:A,tooltip:e?"Expire the current token and claim a new one.":"Only admins can regenerate the token.",disabled:r||!e})})]}))}},67723(e,n,t){"use strict";t.d(n,{A:()=>C});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(96540),s=t(51510),r=t(42358);function A(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const c=(0,s.default)(r.Flex).attrs((e=>function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?A(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):A(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({column:!0,padding:[2,4,4,2]},e))).withConfig({displayName:"styled__StyledTerminalCommand",componentId:"sc-3zs5xg-0"})(["position:relative;top:-1px;color:",";background:",";border:1px solid ",";cursor:pointer;overflow-wrap:anywhere;white-space:pre-wrap;width:100%;font-family:monospace;font-weight:bold;letter-spacing:0.09px;line-height:16px;font-size:14px;word-break:break-word;overflow-y:auto;margin-top:",";"],(0,r.getColor)("primary"),(0,r.getColor)("terminalGreen"),(0,r.getColor)("terminalGreenBorder"),(e=>{let{noMargin:n}=e;return n?"0":"16px"})),l=(0,s.default)(r.Icon).withConfig({displayName:"styled__StyledIcon",componentId:"sc-3zs5xg-1"})(["display:flex;align-self:flex-end;cursor:pointer;position:absolute;bottom:8px;right:8px;"]);var d=t(35184),u=t(74848);const h=["blurred","blurProps"];function m(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function g(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?m(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):m(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}var p=t(93234),f=t(88307),y=t(3319),b=t(19844);const E=["children","logProps"];function w(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function B(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?w(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):w(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const C=(M=e=>{let{children:n,logProps:t={}}=e,o=(0,i.A)(e,E);const s=(0,a.useContext)(b.f),r=(0,a.useMemo)((()=>(0,p.jU)(n)),[n]),{sendLog:A,isReady:d}=(0,y.A)(),h=(0,a.useCallback)((()=>{var e;(0,f.C)(r,{text:"Config copied to your clipboard."})(),null===s||void 0===s||null===(e=s.startLoading)||void 0===e||e.call(s),A(B({feature:"ConnectNode",isStart:!0,copyString:r},t))}),[d,r,s]);return(0,u.jsxs)(c,B(B({onClick:h},o),{},{children:[n,(0,u.jsx)(l,{name:"copy",size:"small",color:"textLite"})]}))},e=>{let{blurred:n,blurProps:t}=e,o=(0,i.A)(e,h);return n?(0,u.jsx)(d.Ay,g(g({},t),{},{children:(0,u.jsx)(M,g({},o))})):(0,u.jsx)(M,g({},o))});var M},75616(e,n,t){"use strict";t.d(n,{A:()=>B});var o=t(64467),i=(t(98992),t(54520),t(3949),t(62953),t(96540)),a=t(51510),s=t(63950),r=t.n(s),A=t(42358),c=t(24609),l=t(3561),d=t(6304),u=t(52353),h=t(74891),m=t(3319),g=t(74848);function p(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function f(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?p(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):p(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const y=(0,h.A)(A.Button),b=e=>{let{name:n,onEdit:t=r()}=e;return(0,g.jsxs)(A.Flex,{alignItems:"center",gap:2,children:[(0,g.jsx)(A.TextBig,{color:"pnlText",children:n}),(0,g.jsx)(y,{icon:"pencilOutline",flavour:"borderless",iconColor:"text",iconSize:"small",padding:[0],onClick:t,tooltip:"Edit space name"})]})},E=(0,a.default)(A.Flex).withConfig({displayName:"spaceInfo__StyledWrapper",componentId:"sc-166ce7m-0"})(["margin-top:-3px !important;margin-bottom:-3px !important;"]),w=e=>{let{name:n,setName:t=r(),onSave:o=r(),onCancel:a=r(),isLoading:s}=e;const[c,l]=(0,i.useState)(),d=(0,i.useMemo)((()=>(0,u.fc)(n)),[n]);return(0,g.jsxs)(E,{gap:2,children:[(0,g.jsx)(A.TextInput,{value:n,onChange:e=>{l(!0),t(e.target.value)},onKeyDown:e=>{let{code:n}=e;["Enter","NumpadEnter"].includes(n)&&o()},disabled:s,size:"small",error:u.xc[d],isDirty:c,instantFeedback:"all",background:"pnlBackground",border:"none"}),(0,g.jsx)(A.Button,{label:"Save",flavour:"borderless",iconColor:"text",iconSize:"small",padding:[0],onClick:o,disabled:s||!!d}),(0,g.jsx)(A.Button,{label:"Cancel",flavour:"borderless",iconColor:"text",iconSize:"small",padding:[0],onClick:a,disabled:s})]})},B=function(){let{onStateChange:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{id:n,name:t}=(0,c.ap)(),o=(0,l.A)(n),[a,,s,r]=(0,d.A)(),[A,,u,h]=(0,d.A)(),[p,y]=(0,i.useState)(t),{sendLog:E}=(0,m.A)(),B=(0,i.useCallback)((e=>{r(),h(),e&&E(e)}),[r,h,E]),C=(0,i.useCallback)((e=>{r(),e&&E(e)}),[r,E]),M=(0,i.useCallback)((()=>{const e={feature:"SpaceInfo",oldName:t,newName:p};s(),o({name:p},{onSuccess:()=>B(f(f({},e),{},{isSuccess:!0,description:"SUCCESS - Change space name"})),onFail:()=>C(f(f({},e),{},{isFailure:!0,description:"ERROR - Change space name"}))})}),[t,p,o,s,B,C,E]);return(0,i.useEffect)((()=>{e&&e({isEditable:A})}),[A,e]),A?(0,g.jsx)(w,{name:p,setName:y,onSave:M,onCancel:h,isLoading:a}):(0,g.jsx)(b,{name:t,onEdit:u})}},14815(e,n,t){"use strict";t.d(n,{V0:()=>J,vD:()=>$,jI:()=>ee,qC:()=>q,Jr:()=>Z,v2:()=>K,D_:()=>W,yI:()=>G,rP:()=>X,aw:()=>V});var o=t(64467),i=(t(98992),t(54520),t(3949),t(62953),t(96540)),a=t(63950),s=t.n(a),r=t(42358),A=t(32742),c=t(75616),l=t(45087),d=t(89590),u=t(79748),h=t(24609),m=t(19186),g=t(98545),p=t(30403),f=t(24285),y=t(99728),b=t(92862),E=t(6304),w=t(94404),B=t(74848);function C(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function M(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?C(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):C(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const T=(0,w.A)(r.Button),I={light:"primary",dark:"white",unspecified:"primary"},v=e=>{let{onClick:n}=e;const t={icon:"add_user",flavour:"borderless",onClick:n,label:"Invite your team","data-ga":"integrations::click-invite-team::header"};return(0,y.JT)("space:InviteUser")?(0,B.jsx)(T,M({},t)):(0,B.jsx)(l.A,{content:"You can't invite a user with your current permissions",children:(0,B.jsx)(r.Flex,{children:(0,B.jsx)(T,M(M({},t),{},{disabled:!0}))})})},_=e=>{let{nodesCount:n,onClick:t=s()}=e;const o=(0,y.JT)("node:Create"),a=(0,i.useMemo)((()=>({onClick:t,disabled:!o})),[o]),A=n>0?"Connect more!":"Connect a node";return o?(0,B.jsx)(u.A,M(M({},a),{},{children:A})):(0,B.jsx)(l.A,{content:"You don't have permission to connect new nodes. Please contact an administrator to do it",children:(0,B.jsx)(r.Box,{children:(0,B.jsx)(u.A,M(M({},a),{},{children:A}))})})},Q=e=>{let{nodesCount:n,onConnectClick:t=s()}=e;return(0,B.jsxs)(r.Flex,{gap:2,children:[(0,B.jsx)(_,{nodesCount:n,onClick:t}),(0,B.jsx)(r.Text,{children:"to get started"})]})},D=e=>{let{nodesCount:n,onConnectClick:t=s()}=e;return(0,B.jsxs)(r.Flex,{gap:2,children:[(0,B.jsxs)(r.Text,{children:["You have connected ",n," node",1===n?"":"s","."]}),(0,B.jsx)(_,{nodesCount:n,onClick:t})]})},x=(0,i.memo)((e=>{let{nodesCount:n,onConnectClick:t=s()}=e;return(0,B.jsxs)(r.Flex,{gap:2,children:[(0,B.jsx)(r.Text,{children:"This is your new space"}),n>0?(0,B.jsx)(D,{nodesCount:n,onConnectClick:t}):(0,B.jsx)(Q,{nodesCount:n,onConnectClick:t})]})})),k=e=>{let{onInvite:n=s()}=e;const t=(0,f.xd)("theme"),o=(0,h.vt)(),a=(0,m.ID)(),l=(0,m.J_)(o,p.mL),[u,,y,w]=(0,E.A)(),C=(0,b.A)(),[M,_]=(0,i.useState)([]);(0,g.A)({spaceId:o,id:a||l,pollingInterval:3e3,keepPolling:!0,onNodeIdsChange:e=>{var n;_((null===e||void 0===e||null===(n=e.nodeIds)||void 0===n?void 0:n.length)||0)}});const Q=(0,i.useCallback)((e=>{e.preventDefault,y()}),[y]),D=(0,i.useMemo)((()=>M>0),[M]);return(0,i.useEffect)((()=>{D&&C()}),[D]),(0,B.jsxs)(B.Fragment,{children:[(0,B.jsx)(A.A,{height:"66px",width:"66px",color:I[t]}),(0,B.jsx)(r.H1,{strong:!1,children:"Welcome to Netdata!"}),(0,B.jsx)(x,{nodesCount:M,onConnectClick:Q}),(0,B.jsx)(c.A,{}),(0,B.jsxs)(r.Flex,{gap:4,children:[(0,B.jsx)(v,{onClick:n}),(0,B.jsx)(T,{icon:"rocket",flavour:D?"default":"borderless",onClick:C,label:D?"Launch space":"Node Required for Launch",disabled:!D,fetaure:"LaunchSpace"})]}),u&&(0,B.jsx)(d.A,{onClose:w})]})};var R=t(51510),S=t(41344),P=t(42790),F=t(37617);function Y(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function U(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Y(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Y(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const N=(0,R.default)(r.Box).attrs((e=>U({position:"absolute",top:"24px",left:"12px"},e))).withConfig({displayName:"settingsPageFlavour__AnchorContainer",componentId:"sc-1cb895j-0"})([""]),j=(z=()=>(0,B.jsxs)(B.Fragment,{children:[(0,B.jsx)(r.H1,{strong:!1,children:"Integrations Made Easy!"}),(0,B.jsx)(r.Text,{children:"Unleash the Power of Connected Systems"})]}),e=>{const[n,t]=(0,P.N9)("onboarding"),{path:o}=(0,F.A)();return(0,i.useEffect)((()=>()=>t("")),[]),n&&o?(0,B.jsxs)(B.Fragment,{children:[(0,B.jsx)(N,{children:(0,B.jsx)(u.A,{as:S.N_,to:o,children:(0,B.jsxs)(r.Flex,{alignItems:"center",gap:2,children:[(0,B.jsx)(r.Icon,{name:"arrow_left",color:"link"}),(0,B.jsx)(r.Text,{color:"link",children:"Back to node setup"})]})})}),(0,B.jsx)(z,U({},e))]}):(0,B.jsx)(z,U({},e))});var z,H=t(19844),O=t(22794);const L=e=>{let{integration:n={}}=e;return(0,B.jsx)(r.Flex,{width:"100%",column:!0,gap:2,padding:[4,0,0,0],children:(0,B.jsx)(O.A,{children:n.alerts})})},G="deploy.operating-systems",J=34,q="data-collection",K="deploy-kubernetes",V={deploy:"rocket","data-collection":"collect",notify:"alarmFilled",export:"importExport",logs:"logs",auth:"auth"},X={deploy:H.A,"alert-notifications":L},W={homePage:"homePage",settingsPage:"settingsPage",addNodesModal:"addNodesModal"},Z=W.homePage,$={homePage:{headerContent:k},settingsPage:{headerContent:j},addNodesModal:{headerContent:null}},ee={homePage:{header:{height:284,wrapperProps:{padding:[4,0]}},search:{wrapperProps:{width:"500px",margin:[0]},inputProps:{containerStyles:{width:"100%"}}},cncf:{height:80},categoryCharacterWidth:9},settingsPage:{header:{height:210,wrapperProps:{padding:[4,0]}},search:{wrapperProps:{width:"500px",margin:[0]},inputProps:{containerStyles:{width:"100%"}}},cncf:{height:0},categoryCharacterWidth:9},addNodesModal:{header:{height:126,wrapperProps:{width:"100%",padding:[4]}},search:{wrapperProps:{width:"100%",margin:[0],flex:!0},inputProps:{containerStyles:{width:"500px"},size:"small"}},cncf:{height:16},categoryCharacterWidth:9}}},32465(e,n,t){"use strict";t.d(n,{A:()=>i});t(98992),t(72577);var o=t(61661);const i=()=>{const e=(0,o.j6)();return n=>e.find((e=>e.id==n))}},92862(e,n,t){"use strict";t.d(n,{A:()=>s});var o=t(96540),i=t(41344),a=t(24609);const s=()=>{const e=(0,i.Zp)(),n=(0,a.bq)();return(0,o.useCallback)((()=>{e("/spaces/".concat(n))}),[n,e])}},61661(e,n,t){"use strict";t.d(n,{j6:()=>S,x9:()=>R,hh:()=>k,XL:()=>L,b8:()=>F,FF:()=>N,oE:()=>z,GT:()=>j,AR:()=>U,yv:()=>O,q2:()=>H,WB:()=>Y,Ss:()=>P});var o=t(80045),i=t(64467),a=(t(98992),t(54520),t(72577),t(3949),t(81454),t(62953),t(96540)),s=t(34843),r=t(30569),A=t(42790),c=t(52035),l=t(84929);const d=[{id:"deploy",name:"Deploy",description:"",children:[{id:"deploy.operating-systems",name:"Operating Systems",description:"",children:[]},{id:"deploy.docker-kubernetes",name:"Docker & Kubernetes",description:"",children:[]},{id:"deploy.provisioning-systems",name:"Provisioning Systems",description:"",children:[]}]},{id:"data-collection",name:"Data Collection",description:"",children:[{id:"data-collection.databases",name:"Databases",description:"",children:[]},{id:"data-collection.web-servers-and-proxies",name:"Web Servers and Proxies",description:"",children:[]},{id:"data-collection.containers-and-vms",name:"Containers and VMs",description:"",children:[]},{id:"data-collection.operating-systems",name:"Operating Systems",description:"",children:[]},{id:"data-collection.networking",name:"Networking",description:"",children:[]},{id:"data-collection.cloud-and-devops",name:"Cloud and DevOps",description:"",children:[]},{id:"data-collection.hardware-and-sensors",name:"Hardware and Sensors",description:"",children:[]},{id:"data-collection.applications",name:"Applications",description:"",collector_default:!0,children:[]},{id:"data-collection.storage",name:"Storage and Filesystems",description:"",children:[]},{id:"data-collection.synthetic-testing",name:"Synthetic Testing",description:"",children:[]}]},{id:"logs",name:"Logs",description:"Monitoring logs on your infrastructure",children:[]},{id:"export",name:"exporters",description:"Exporter Integrations",children:[]},{id:"notify",name:"notifications",description:"Notification Integrations",children:[{id:"notify.agent",name:"Agent Dispatched Notifications",description:"",children:[]},{id:"notify.cloud",name:"Centralized Cloud Notifications",description:"",children:[]}]},{id:"auth",name:"authentication",description:"Authentication & Authorization",children:[]}],u=[{meta:{plugin_name:"apps.plugin",module_name:"apps",monitored_instance:{name:"Applications",link:"",categories:["data-collection.operating-systems"],icon_filename:"applications.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["applications","processes","os","host monitoring"]},overview:"# Applications\n\nPlugin: apps.plugin\nModule: apps\n\n## Overview\n\nMonitor Applications for optimal software performance and resource usage.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per applications group\n\nThese metrics refer to the application group.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.cpu_utilization | user, system | percentage |\n| app.cpu_guest_utilization | guest | percentage |\n| app.cpu_context_switches | voluntary, involuntary | switches/s |\n| app.estimated_mem_usage | mem | MiB |\n| app.mem_usage | rss | MiB |\n| app.mem_private_usage | mem | MiB |\n| app.vmem_usage | vmem | MiB |\n| app.mem_page_faults | minor, major | pgfaults/s |\n| app.swap_usage | swap | MiB |\n| app.disk_physical_io | reads, writes | KiB/s |\n| app.disk_logical_io | reads, writes | KiB/s |\n| app.processes | processes | processes |\n| app.threads | threads | threads |\n| app.fds_open_limit | limit | percentage |\n| app.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| app.uptime | uptime | seconds |\n| app.uptime_summary | min, avg, max | seconds |\n\n",integration_type:"collector",id:"apps.plugin-apps-Applications",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"apps.plugin",module_name:"groups",monitored_instance:{name:"User Groups",link:"",categories:["data-collection.operating-systems"],icon_filename:"user.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["groups","processes","user auditing","authorization","os","host monitoring"]},overview:"# User Groups\n\nPlugin: apps.plugin\nModule: groups\n\n## Overview\n\nThis integration monitors resource utilization on a user groups context.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per user group\n\nThese metrics refer to the user group.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| user_group | The name of the user group. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| usergroup.cpu_utilization | user, system | percentage |\n| usergroup.cpu_guest_utilization | guest | percentage |\n| usergroup.cpu_context_switches | voluntary, involuntary | switches/s |\n| usergroup.estimated_mem_usage | mem | MiB |\n| usergroup.mem_usage | rss | MiB |\n| usergroup.mem_private_usage | mem | MiB |\n| usergroup.vmem_usage | vmem | MiB |\n| usergroup.mem_page_faults | minor, major | pgfaults/s |\n| usergroup.swap_usage | swap | MiB |\n| usergroup.disk_physical_io | reads, writes | KiB/s |\n| usergroup.disk_logical_io | reads, writes | KiB/s |\n| usergroup.processes | processes | processes |\n| usergroup.threads | threads | threads |\n| usergroup.fds_open_limit | limit | percentage |\n| usergroup.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| usergroup.uptime | uptime | seconds |\n| usergroup.uptime_summary | min, avg, max | seconds |\n\n",integration_type:"collector",id:"apps.plugin-groups-User_Groups",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"apps.plugin",module_name:"users",monitored_instance:{name:"Users",link:"",categories:["data-collection.operating-systems"],icon_filename:"users.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["users","processes","os","host monitoring"]},overview:"# Users\n\nPlugin: apps.plugin\nModule: users\n\n## Overview\n\nThis integration monitors resource utilization on a user context.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| user | The name of the user. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| user.cpu_utilization | user, system | percentage |\n| user.cpu_guest_utilization | guest | percentage |\n| user.cpu_context_switches | voluntary, involuntary | switches/s |\n| user.estimated_mem_usage | mem | MiB |\n| user.mem_usage | rss | MiB |\n| user.mem_private_usage | mem | MiB |\n| user.vmem_usage | vmem | MiB |\n| user.mem_page_faults | minor, major | pgfaults/s |\n| user.swap_usage | swap | MiB |\n| user.disk_physical_io | reads, writes | KiB/s |\n| user.disk_logical_io | reads, writes | KiB/s |\n| user.processes | processes | processes |\n| user.threads | threads | threads |\n| user.fds_open_limit | limit | percentage |\n| user.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| user.uptime | uptime | seconds |\n| user.uptime_summary | min, avg, max | seconds |\n\n",integration_type:"collector",id:"apps.plugin-users-Users",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"AWS ECS Containers",link:"https://aws.amazon.com/ecs/",icon_filename:"aws.svg",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["aws","ecs","elastic container service","amazon","containers"]},overview:"# AWS ECS Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor AWS ECS container resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-AWS_ECS_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Containers",link:"",categories:["data-collection.containers-and-vms"],icon_filename:"container.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["containers","docker","podman","containerd","cri-o","lxc","lxd","incus","nomad","ecs","systemd-nspawn","nspawn","cgroups","linux containers"]},overview:"# Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor containers and virtual machines resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Docker Containers",link:"https://www.docker.com/",icon_filename:"docker.svg",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["docker","containers","docker compose","docker swarm","moby","containerd","runc"]},overview:"# Docker Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Docker container resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Docker_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Kubernetes Containers",link:"https://kubernetes.io/",icon_filename:"kubernetes.svg",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"k8s_state"},{plugin_name:"go.d.plugin",module_name:"k8s_apiserver"},{plugin_name:"go.d.plugin",module_name:"k8s_kubelet"},{plugin_name:"go.d.plugin",module_name:"k8s_kubeproxy"},{plugin_name:"go.d.plugin",module_name:"coredns"}]}},info_provided_to_referring_integrations:{description:""},keywords:["k8s","kubernetes","pods","containers","openshift","rancher","rke","rke2","k3s","microk8s","eks","gke","aks","tanzu","minikube","kind","containerd","cri-o","kubelet","kubepods"]},overview:'# Kubernetes Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor containers and virtual machines resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nKubernetes Containers can be monitored further using the following other integrations:\n\n- {% relatedResource id="go.d.plugin-k8s_state-Kubernetes_Cluster_State" %}Kubernetes Cluster State{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_apiserver-Kubernetes_API_Server" %}Kubernetes API Server{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_kubelet-Kubelet" %}Kubelet{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_kubeproxy-Kubeproxy" %}Kubeproxy{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-coredns-CoreDNS" %}CoreDNS{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn\'t support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ k8s_cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ k8s_cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.mem_usage | cgroup memory utilization |\n| [ k8s_cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ k8s_cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per k8s cgroup\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: "pod" or "container". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.cpu_limit | used | percentage |\n| k8s.cgroup.cpu | user, system | percentage |\n| k8s.cgroup.cpu_per_core | a dimension per core | percentage |\n| k8s.cgroup.throttled | throttled | percentage |\n| k8s.cgroup.throttled_duration | duration | ms |\n| k8s.cgroup.cpu_shares | shares | shares |\n| k8s.cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| k8s.cgroup.writeback | dirty, writeback | MiB |\n| k8s.cgroup.mem_activity | in, out | MiB/s |\n| k8s.cgroup.pgfaults | pgfault, swap | MiB/s |\n| k8s.cgroup.mem_usage | ram, swap | MiB |\n| k8s.cgroup.mem_usage_limit | available, used | MiB |\n| k8s.cgroup.mem_utilization | utilization | percentage |\n| k8s.cgroup.mem_failcnt | failures | count |\n| k8s.cgroup.io | read, write | KiB/s |\n| k8s.cgroup.serviced_ops | read, write | operations/s |\n| k8s.cgroup.throttle_io | read, write | KiB/s |\n| k8s.cgroup.throttle_serviced_ops | read, write | operations/s |\n| k8s.cgroup.queued_ops | read, write | operations |\n| k8s.cgroup.merged_ops | read, write | operations/s |\n| k8s.cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_some_pressure_stall_time | time | ms |\n| k8s.cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_full_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_some_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_full_pressure_stall_time | time | ms |\n| k8s.cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_some_pressure_stall_time | time | ms |\n| k8s.cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_full_pressure_stall_time | time | ms |\n| k8s.cgroup.pids_current | pids | pids |\n\n### Per k8s cgroup network device\n\nThese metrics refer to the Pod container network interface.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: "pod" or "container". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.net_net | received, sent | kilobits/s |\n| k8s.cgroup.net_packets | received, sent, multicast | pps |\n| k8s.cgroup.net_errors | inbound, outbound | errors/s |\n| k8s.cgroup.net_drops | inbound, outbound | errors/s |\n| k8s.cgroup.net_fifo | receive, transmit | errors/s |\n| k8s.cgroup.net_compressed | receive, sent | pps |\n| k8s.cgroup.net_events | frames, collisions, carrier | events/s |\n| k8s.cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| k8s.cgroup.net_carrier | up, down | state |\n| k8s.cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Kubernetes_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"LXC Containers",link:"",icon_filename:"lxc.png",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["lxc","lxd","incus","linux containers","system containers","container"]},overview:"# LXC Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor LXC/LXD/Incus container resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-LXC_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Libvirt VMs and Containers",link:"",icon_filename:"libvirt.png",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["libvirt","kvm","qemu","virsh","virt-manager","virtual machine","vm","container"]},overview:"# Libvirt VMs and Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor libvirt-managed VM and container resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Libvirt_VMs_and_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Nomad Containers",link:"https://www.nomadproject.io/",icon_filename:"nomad.png",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["nomad","hashicorp","containers","orchestrator"]},overview:"# Nomad Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor HashiCorp Nomad container resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Nomad_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"OpenShift Containers",link:"https://www.redhat.com/en/technologies/cloud-computing/openshift",icon_filename:"openshift.png",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["openshift","red hat openshift","okd","kubernetes","k8s","containers","pods"]},overview:"# OpenShift Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Red Hat OpenShift container resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ k8s_cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ k8s_cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.mem_usage | cgroup memory utilization |\n| [ k8s_cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ k8s_cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per k8s cgroup\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: "pod" or "container". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.cpu_limit | used | percentage |\n| k8s.cgroup.cpu | user, system | percentage |\n| k8s.cgroup.cpu_per_core | a dimension per core | percentage |\n| k8s.cgroup.throttled | throttled | percentage |\n| k8s.cgroup.throttled_duration | duration | ms |\n| k8s.cgroup.cpu_shares | shares | shares |\n| k8s.cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| k8s.cgroup.writeback | dirty, writeback | MiB |\n| k8s.cgroup.mem_activity | in, out | MiB/s |\n| k8s.cgroup.pgfaults | pgfault, swap | MiB/s |\n| k8s.cgroup.mem_usage | ram, swap | MiB |\n| k8s.cgroup.mem_usage_limit | available, used | MiB |\n| k8s.cgroup.mem_utilization | utilization | percentage |\n| k8s.cgroup.mem_failcnt | failures | count |\n| k8s.cgroup.io | read, write | KiB/s |\n| k8s.cgroup.serviced_ops | read, write | operations/s |\n| k8s.cgroup.throttle_io | read, write | KiB/s |\n| k8s.cgroup.throttle_serviced_ops | read, write | operations/s |\n| k8s.cgroup.queued_ops | read, write | operations |\n| k8s.cgroup.merged_ops | read, write | operations/s |\n| k8s.cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_some_pressure_stall_time | time | ms |\n| k8s.cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_full_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_some_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_full_pressure_stall_time | time | ms |\n| k8s.cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_some_pressure_stall_time | time | ms |\n| k8s.cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_full_pressure_stall_time | time | ms |\n| k8s.cgroup.pids_current | pids | pids |\n\n### Per k8s cgroup network device\n\nThese metrics refer to the Pod container network interface.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: "pod" or "container". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.net_net | received, sent | kilobits/s |\n| k8s.cgroup.net_packets | received, sent, multicast | pps |\n| k8s.cgroup.net_errors | inbound, outbound | errors/s |\n| k8s.cgroup.net_drops | inbound, outbound | errors/s |\n| k8s.cgroup.net_fifo | receive, transmit | errors/s |\n| k8s.cgroup.net_compressed | receive, sent | pps |\n| k8s.cgroup.net_events | frames, collisions, carrier | events/s |\n| k8s.cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| k8s.cgroup.net_carrier | up, down | state |\n| k8s.cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-OpenShift_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"OpenStack VMs",link:"https://www.openstack.org/",icon_filename:"openstack.svg",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["openstack","openstack nova","openstack compute","kvm","qemu","libvirt","virtual machine","vm","cloud"]},overview:"# OpenStack VMs\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor OpenStack Nova virtual machine resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-OpenStack_VMs",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Podman Containers",link:"https://podman.io/",icon_filename:"podman.png",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["podman","containers","pods","oci","crun","runc"]},overview:"# Podman Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Podman container resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Podman_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Proxmox VMs and Containers",link:"",icon_filename:"proxmox.png",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["proxmox","proxmox ve","pve","kvm","qemu","lxc","libvirt","virtual machine","vm","container"]},overview:"# Proxmox VMs and Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Proxmox VE virtual machine and container resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Proxmox_VMs_and_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Systemd Services",link:"",icon_filename:"systemd.svg",categories:["data-collection.operating-systems"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["systemd","services","units","daemons","systemctl","cgroups"]},overview:"# Systemd Services\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor containers and virtual machines resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd service\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| service_name | Service name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service.cpu.utilization | user, system | percentage |\n| systemd.service.memory.usage | ram, swap | MiB |\n| systemd.service.memory.failcnt | fail | failures/s |\n| systemd.service.memory.ram.usage | rss, cache, mapped_file, rss_huge | MiB |\n| systemd.service.memory.writeback | writeback, dirty | MiB |\n| systemd.service.memory.paging.faults | minor, major | MiB/s |\n| systemd.service.memory.paging.io | in, out | MiB/s |\n| systemd.service.disk.io | read, write | KiB/s |\n| systemd.service.disk.iops | read, write | operations/s |\n| systemd.service.disk.throttle.io | read, write | KiB/s |\n| systemd.service.disk.throttle.iops | read, write | operations/s |\n| systemd.service.disk.queued_iops | read, write | operations/s |\n| systemd.service.disk.merged_iops | read, write | operations/s |\n| systemd.service.pids.current | pids | pids |\n\n",integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Systemd_Services",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"Virtual Machines",link:"",icon_filename:"container.svg",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["vms","virtual machines","virtualization","hypervisor","kvm","qemu","libvirt","proxmox","proxmox ve","ovirt","openstack","openstack nova","cgroups"]},overview:"# Virtual Machines\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor virtual machine resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-Virtual_Machines",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"containerd Containers",link:"https://containerd.io/",icon_filename:"containerd.png",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["containerd","containers","cri","container runtime","nerdctl"]},overview:"# containerd Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor containerd container resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-containerd_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"oVirt VMs",link:"",icon_filename:"ovirt.svg",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ovirt","rhev","red hat virtualization","red hat enterprise virtualization","kvm","qemu","libvirt","virtual machine","vm"]},overview:"# oVirt VMs\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor oVirt/RHEV virtual machine resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-oVirt_VMs",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance:{name:"systemd-nspawn Containers",link:"https://www.freedesktop.org/software/systemd/man/systemd-nspawn.html",icon_filename:"systemd.svg",categories:["data-collection.containers-and-vms"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["systemd-nspawn","nspawn","machinectl","systemd","containers"]},overview:"# systemd-nspawn Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor systemd-nspawn container resource utilization \u2014 CPU, memory, disk I/O, and network \u2014 via Linux cgroups.\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container\'s network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always "virtual" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n',integration_type:"collector",id:"cgroups.plugin-/sys/fs/cgroup-systemd-nspawn_Containers",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"charts.d.plugin",module_name:"libreswan",monitored_instance:{name:"Libreswan",link:"https://libreswan.org/",categories:["data-collection.networking"],icon_filename:"libreswan.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["vpn","libreswan","network","ipsec"]},overview:"# Libreswan\n\nPlugin: charts.d.plugin\nModule: libreswan\n\n## Overview\n\nMonitor Libreswan performance for optimal IPsec VPN operations. Improve your VPN operations with Netdata''s real-time metrics and built-in alerts.\n\nThe collector uses the `ipsec` command to collect the information it needs.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Permissions to execute `ipsec`\n\nThe plugin executes 2 commands to collect all the information it needs:\n\n```sh\nipsec whack --status\nipsec whack --trafficstatus\n```\n\nThe first command is used to extract the currently established tunnels, their IDs and their names.\nThe second command is used to extract the current uptime and traffic.\n\nMost probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.\nThe plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.\n\nTo allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:\n\n```\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus\n```\n\nMake sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).\n\n\n\n### Configuration\n\n#### Options\n\nThe config file is sourced by the charts.d plugin. It\'s a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the libreswan collector.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| libreswan_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| libreswan_priority | The charts priority on the dashboard | 90000 | no |\n| libreswan_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n| libreswan_sudo | Whether to run `ipsec` with `sudo` or not. | 1 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `charts.d/libreswan.conf`.\n\nThe file format is POSIX shell script. Generally, the structure is:\n\n```sh\nOPTION_1="some value"\nOPTION_2="some other value"\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/libreswan.conf\n```\n\n##### Examples\n\n###### Run `ipsec` without sudo\n\nRun the `ipsec` utility without sudo\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#libreswan_update_every=1\n\n# the charts priority on the dashboard\n#libreswan_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#libreswan_retries=10\n\n# set to 1, to run ipsec with sudo (the default)\n# set to 0, to run ipsec without sudo\nlibreswan_sudo=0\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `libreswan` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n  ```bash\n  ./charts.d.plugin debug 1 libreswan\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `libreswan` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep libreswan\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep libreswan /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep libreswan\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPSEC tunnel\n\nMetrics related to IPSEC tunnels. Each tunnel provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| libreswan.net | in, out | kilobits/s |\n| libreswan.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"charts.d.plugin-libreswan-Libreswan",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/libreswan/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"charts.d.plugin",module_name:"opensips",monitored_instance:{name:"OpenSIPS",link:"https://opensips.org/",categories:["data-collection.applications"],icon_filename:"opensips.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["opensips","sip","voice","video","stream"]},overview:"# OpenSIPS\n\nPlugin: charts.d.plugin\nModule: opensips\n\n## Overview\n\nExamine OpenSIPS metrics for insights into SIP server operations. Study call rates, error rates, and response times for reliable voice over IP services.\n\nThe collector uses the `opensipsctl` command line utility to gather OpenSIPS metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will attempt to call `opensipsctl` along with a default number of parameters, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nThe collector requires the `opensipsctl` to be installed.\n\n\n### Configuration\n\n#### Options\n\nThe config file is sourced by the charts.d plugin. It\'s a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the opensips collector.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| opensips_opts | Specify parameters to the `opensipsctl` command. If the default value fails to get global status, set here whatever options are needed to connect to the opensips server. | fifo get_statistics all | no |\n| opensips_cmd | If `opensipsctl` is not in $PATH, specify it\'s full path here. |  | no |\n| opensips_timeout | How long to wait for `opensipsctl` to respond. | 2 | no |\n| opensips_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 5 | no |\n| opensips_priority | The charts priority on the dashboard. | 80000 | no |\n| opensips_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `charts.d/opensips.conf`.\n\nThe file format is POSIX shell script. Generally, the structure is:\n\n```sh\nOPTION_1="some value"\nOPTION_2="some other value"\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/opensips.conf\n```\n\n##### Examples\n\n###### Custom `opensipsctl` command\n\nSet a custom path to the `opensipsctl` command\n\n```yaml\n#opensips_opts="fifo get_statistics all"\nopensips_cmd=/opt/opensips/bin/opensipsctl\n#opensips_timeout=2\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#opensips_update_every=5\n\n# the charts priority on the dashboard\n#opensips_priority=80000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#opensips_retries=10\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `opensips` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n  ```bash\n  ./charts.d.plugin debug 1 opensips\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `opensips` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep opensips\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep opensips /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep opensips\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenSIPS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| opensips.dialogs_active | active, early | dialogs |\n| opensips.users | registered, location, contacts, expires | users |\n| opensips.registrar | accepted, rejected | registrations/s |\n| opensips.transactions | UAS, UAC | transactions/s |\n| opensips.core_rcv | requests, replies | queries/s |\n| opensips.core_fwd | requests, replies | queries/s |\n| opensips.core_drop | requests, replies | queries/s |\n| opensips.core_err | requests, replies | queries/s |\n| opensips.core_bad | bad_URIs_rcvd, unsupported_methods, bad_msg_hdr | queries/s |\n| opensips.tm_replies | received, relayed, local | replies/s |\n| opensips.transactions_status | 2xx, 3xx, 4xx, 5xx, 6xx | transactions/s |\n| opensips.transactions_inuse | inuse | transactions |\n| opensips.sl_replies | 1xx, 2xx, 3xx, 4xx, 5xx, 6xx, sent, error, ACKed | replies/s |\n| opensips.dialogs | processed, expire, failed | dialogs/s |\n| opensips.net_waiting | UDP, TCP | kilobytes |\n| opensips.uri_checks | positive, negative | checks / sec |\n| opensips.traces | requests, replies | traces / sec |\n| opensips.shmem | total, used, real_used, max_used, free | kilobytes |\n| opensips.shmem_fragment | fragments | fragments |\n\n",integration_type:"collector",id:"charts.d.plugin-opensips-OpenSIPS",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/opensips/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"cups.plugin",module_name:"cups.plugin",monitored_instance:{name:"CUPS",link:"https://www.cups.org/",categories:["data-collection.applications"],icon_filename:"cups.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# CUPS\n\nPlugin: cups.plugin\nModule: cups.plugin\n\n## Overview\n\nMonitor CUPS performance for achieving optimal printing system operations. Monitor job statuses, queue lengths, and error rates to ensure smooth printing tasks.\n\nThe plugin uses CUPS shared library to connect and monitor the server.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access the server. Netdata sets permissions during installation time to reach the server through its library.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin detects when CUPS server is running and tries to connect to it.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Minimum setup\n\nThe CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`.\n\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additional parameters for the collector |  | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:cups]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CUPS instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.dests_state | idle, printing, stopped | dests |\n| cups.dests_option | total, acceptingjobs, shared | dests |\n| cups.job_num | pending, held, processing | jobs |\n| cups.job_size | pending, held, processing | KB |\n\n### Per destination\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.destination_job_num | pending, held, processing | jobs |\n| cups.destination_job_size | pending, held, processing | KB |\n\n",integration_type:"collector",id:"cups.plugin-cups.plugin-CUPS",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/cups.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"debugfs.plugin",module_name:"/sys/kernel/debug/extfrag",monitored_instance:{name:"System Memory Fragmentation",link:"https://www.kernel.org/doc/html/next/admin-guide/sysctl/vm.html",categories:["data-collection.operating-systems"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["extfrag","extfrag_threshold","memory fragmentation"]},overview:"# System Memory Fragmentation\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/extfrag\n\n## Overview\n\nCollects memory fragmentation statistics from the Linux kernel\n\nParse data from `debugfs` file\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/extfrag`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically run by default.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector |  | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the overall memory fragmentation of the system.\n\n### Per node\n\nMemory fragmentation statistics for each NUMA node in the system.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| numa_node | The NUMA node the metrics are associated with. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.fragmentation_index_dma | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_dma32 | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_normal | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n\n",integration_type:"collector",id:"debugfs.plugin-/sys/kernel/debug/extfrag-System_Memory_Fragmentation",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"debugfs.plugin",module_name:"/sys/kernel/debug/zswap",monitored_instance:{name:"Linux ZSwap",link:"https://www.kernel.org/doc/html/latest/admin-guide/mm/zswap.html",categories:["data-collection.operating-systems"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["swap","zswap","frontswap","swap cache"]},overview:"# Linux ZSwap\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/zswap\n\n## Overview\n\nCollects zswap performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/zswap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector |  | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the performance statistics of zswap.\n\n### Per Linux ZSwap instance\n\nGlobal zswap performance metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.zswap_pool_compression_ratio | compression_ratio | ratio |\n| system.zswap_pool_compressed_size | compressed_size | bytes |\n| system.zswap_pool_raw_size | uncompressed_size | bytes |\n| system.zswap_rejections | compress_poor, kmemcache_fail, alloc_fail, reclaim_fail | rejections/s |\n| system.zswap_pool_limit_hit | limit | events/s |\n| system.zswap_written_back_raw_bytes | written_back | bytes/s |\n| system.zswap_same_filled_raw_size | same_filled | bytes |\n| system.zswap_duplicate_entry | duplicate | entries/s |\n\n",integration_type:"collector",id:"debugfs.plugin-/sys/kernel/debug/zswap-Linux_ZSwap",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"debugfs.plugin",module_name:"intel_rapl",monitored_instance:{name:"Power Capping",link:"https://www.kernel.org/doc/html/next/power/powercap/powercap.html",categories:["data-collection.hardware-and-sensors"],icon_filename:"powersupply.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["power capping","energy"]},overview:"# Power Capping\n\nPlugin: debugfs.plugin\nModule: intel_rapl\n\n## Overview\n\nCollects power capping performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/devices/virtual/powercap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector |  | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the Intel RAPL zones Consumption.\n\n### Per Power Capping instance\n\nGlobal Intel RAPL zones.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.powercap_intel_rapl_zone | Power | Watts |\n| cpu.powercap_intel_rapl_subzones | dram, core, uncore | Watts |\n\n",integration_type:"collector",id:"debugfs.plugin-intel_rapl-Power_Capping",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"diskspace.plugin",module_name:"diskspace.plugin",monitored_instance:{name:"Disk space",link:"",categories:["data-collection.storage"],icon_filename:"hard-drive.svg"},related_resources:{integrations:{list:[{plugin_name:"ebpf.plugin",module_name:"disk"}]}},info_provided_to_referring_integrations:{description:""},keywords:["disk","I/O","space","inode"]},overview:'# Disk space\n\nPlugin: diskspace.plugin\nModule: diskspace.plugin\n\n## Overview\n\nMonitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nDisk space can be monitored further using the following other integrations:\n\n- {% relatedResource id="ebpf.plugin-disk-eBPF_Disk" %}eBPF Disk{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15s | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| mount_point | Path used to mount a filesystem |\n| filesystem | The filesystem used to format a partition. |\n| mount_root | Root directory where mount points are present. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n",integration_type:"collector",id:"diskspace.plugin-diskspace.plugin-Disk_space",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/diskspace.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"cachestat",monitored_instance:{name:"eBPF Cachestat",link:"https://kernel.org/",categories:["data-collection.operating-systems"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""},keywords:["Page cache","Hit ratio","eBPF"]},overview:'# eBPF Cachestat\n\nPlugin: ebpf.plugin\nModule: cachestat\n\n## Overview\n\nMonitor Linux page cache events giving for users a general vision about how his kernel is manipulating files.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\neBPF Cachestat can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n',setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/cachestat.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/cachestat.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Cachestat instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.cachestat_ratio | ratio | % |\n| mem.cachestat_dirties | dirty | page/s |\n| mem.cachestat_hits | hit | hits/s |\n| mem.cachestat_misses | miss | misses/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_cachestat_hit_ratio | ratio | % |\n| app.ebpf_cachestat_dirty_pages | pages | page/s |\n| app.ebpf_cachestat_access | hits | hits/s |\n| app.ebpf_cachestat_misses | misses | misses/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cachestat_ratio | ratio | % |\n| cgroup.cachestat_dirties | dirty | page/s |\n| cgroup.cachestat_hits | hit | hits/s |\n| cgroup.cachestat_misses | miss | misses/s |\n| services.cachestat_ratio | a dimension per systemd service | % |\n| services.cachestat_dirties | a dimension per systemd service | page/s |\n| services.cachestat_hits | a dimension per systemd service | hits/s |\n| services.cachestat_misses | a dimension per systemd service | misses/s |\n\n",integration_type:"collector",id:"ebpf.plugin-cachestat-eBPF_Cachestat",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"dcstat",monitored_instance:{name:"eBPF DCstat",link:"https://kernel.org/",categories:["data-collection.operating-systems"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""},keywords:["Directory Cache","File system","eBPF"]},overview:'# eBPF DCstat\n\nPlugin: ebpf.plugin\nModule: dcstat\n\n## Overview\n\nMonitor directory cache events per application given an overall vision about files on memory or storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\neBPF DCstat can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n',setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config option" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/dcstat.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/dcstat.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_dc_ratio | ratio | % |\n| app.ebpf_dc_reference | files | files |\n| app.ebpf_dc_not_cache | files | files |\n| app.ebpf_dc_not_found | files | files |\n\n### Per filesystem\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.dc_reference | reference, slow, miss | files |\n| filesystem.dc_hit_ratio | ratio | % |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.dc_ratio | ratio | % |\n| cgroup.dc_reference | reference | files |\n| cgroup.dc_not_cache | slow | files |\n| cgroup.dc_not_found | miss | files |\n| services.dc_ratio | a dimension per systemd service | % |\n| services.dc_reference | a dimension per systemd service | files |\n| services.dc_not_cache | a dimension per systemd service | files |\n| services.dc_not_found | a dimension per systemd service | files |\n\n",integration_type:"collector",id:"ebpf.plugin-dcstat-eBPF_DCstat",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"disk",monitored_instance:{name:"eBPF Disk",link:"https://kernel.org/",categories:["data-collection.storage"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["hard Disk","eBPF","latency","partition"]},overview:"# eBPF Disk\n\nPlugin: ebpf.plugin\nModule: disk\n\n## Overview\n\nMeasure latency for I/O events on disk.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/disk.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/disk.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\nThese metrics measure latency for I/O events on every hard disk present on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.latency_io | latency | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-disk-eBPF_Disk",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"filedescriptor",monitored_instance:{name:"eBPF Filedescriptor",link:"https://kernel.org/",categories:["data-collection.operating-systems"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""},keywords:["file","eBPF","fd","open","close"]},overview:'# eBPF Filedescriptor\n\nPlugin: ebpf.plugin\nModule: filedescriptor\n\n## Overview\n\nMonitor calls for functions responsible to open or close a file descriptor and possible errors.\n\nAttach tracing (kprobe and trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netdata sets necessary permissions during installation time.\n\neBPF Filedescriptor can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nDepending of kernel version and frequency that files are open and close, this thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n',setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/fd.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/fd.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.fd_open | open | calls/s |\n| cgroup.fd_open_error | open | calls/s |\n| cgroup.fd_closed | close | calls/s |\n| cgroup.fd_close_error | close | calls/s |\n| services.file_open | a dimension per systemd service | calls/s |\n| services.file_open_error | a dimension per systemd service | calls/s |\n| services.file_closed | a dimension per systemd service | calls/s |\n| services.file_close_error | a dimension per systemd service | calls/s |\n\n### Per eBPF Filedescriptor instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.file_descriptor | open, close | calls/s |\n| filesystem.file_error | open, close | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_file_open | calls | calls/s |\n| app.ebpf_file_open_error | calls | calls/s |\n| app.ebpf_file_closed | calls | calls/s |\n| app.ebpf_file_close_error | calls | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-filedescriptor-eBPF_Filedescriptor",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"filesystem",monitored_instance:{name:"eBPF Filesystem",link:"https://kernel.org/",categories:["data-collection.storage"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["Filesystem","ext4","btrfs","nfs","xfs","zfs","eBPF","latency","I/O"]},overview:"# eBPF Filesystem\n\nPlugin: ebpf.plugin\nModule: filesystem\n\n## Overview\n\nMonitor latency for main actions on filesystem like I/O events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| btrfsdist | Enable or disable latency monitoring for functions associated with btrfs filesystem. | yes | no |\n| ext4dist | Enable or disable latency monitoring for functions associated with ext4 filesystem. | yes | no |\n| nfsdist | Enable or disable latency monitoring for functions associated with nfs filesystem. | yes | no |\n| xfsdist | Enable or disable latency monitoring for functions associated with xfs filesystem. | yes | no |\n| zfsdist | Enable or disable latency monitoring for functions associated with zfs filesystem. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/filesystem.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/filesystem.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per filesystem\n\nLatency charts associate with filesystem actions.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.read_latency | latency period | calls/s |\n| filesystem.open_latency | latency period | calls/s |\n| filesystem.sync_latency | latency period | calls/s |\n\n### Per iilesystem\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.write_latency | latency period | calls/s |\n\n### Per eBPF Filesystem instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.attributte_latency | latency period | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-filesystem-eBPF_Filesystem",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"hardirq",monitored_instance:{name:"eBPF Hardirq",link:"https://kernel.org/",categories:["data-collection.operating-systems"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["HardIRQ","eBPF"]},overview:"# eBPF Hardirq\n\nPlugin: ebpf.plugin\nModule: hardirq\n\n## Overview\n\nMonitor latency for each HardIRQ available.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/hardirq.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/hardirq.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Hardirq instance\n\nThese metrics show latest timestamp for each hardIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.hardirq_latency | hardirq names | milliseconds |\n\n",integration_type:"collector",id:"ebpf.plugin-hardirq-eBPF_Hardirq",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"mdflush",monitored_instance:{name:"eBPF MDflush",link:"https://kernel.org/",categories:["data-collection.storage"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["MD","RAID","eBPF"]},overview:"# eBPF MDflush\n\nPlugin: ebpf.plugin\nModule: mdflush\n\n## Overview\n\nMonitor when flush events happen between disks.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that `md_flush_request` is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/mdflush.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mdflush.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF MDflush instance\n\nNumber of times md_flush_request was called since last time.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mdstat.mdstat_flush | disk | flushes |\n\n",integration_type:"collector",id:"ebpf.plugin-mdflush-eBPF_MDflush",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"mount",monitored_instance:{name:"eBPF Mount",link:"https://kernel.org/",categories:["data-collection.storage"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["mount","umount","device","eBPF"]},overview:"# eBPF Mount\n\nPlugin: ebpf.plugin\nModule: mount\n\n## Overview\n\nMonitor calls for mount and umount syscall.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/mount.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mount.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Mount instance\n\nCalls for syscalls mount an umount.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mount_points.call | mount, umount | calls/s |\n| mount_points.error | mount, umount | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-mount-eBPF_Mount",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"oomkill",monitored_instance:{name:"eBPF OOMkill",link:"https://kernel.org/",categories:["data-collection.operating-systems"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""},keywords:["application","memory"]},overview:'# eBPF OOMkill\n\nPlugin: ebpf.plugin\nModule: oomkill\n\n## Overview\n\nMonitor applications that reach out of memory.\n\nAttach tracepoint to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\neBPF OOMkill can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n',setup:"## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### Options\n\nOverwrite default configuration reducing number of I/O events\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/oomkill.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/oomkill.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"## Troubleshooting\n\n### update every\n\nData collection frequency.\n\n### ebpf load mode\n\nDefine whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).\n\n### lifetime\n\nSet default lifetime for thread when enabled by cloud.\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese metrics show cgroup/service that reached OOM.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.oomkills | cgroup name | kills |\n| services.oomkills | a dimension per systemd service | kills |\n\n### Per apps\n\nThese metrics show cgroup/service that reached OOM.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.oomkill | kills | kills |\n\n",integration_type:"collector",id:"ebpf.plugin-oomkill-eBPF_OOMkill",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"process",monitored_instance:{name:"eBPF Process",link:"https://github.com/netdata/netdata/",categories:["data-collection.operating-systems"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["Memory","plugin","eBPF"]},overview:"# eBPF Process\n\nPlugin: ebpf.plugin\nModule: process\n\n## Overview\n\nMonitor internal memory usage.\n\nUses netdata internal statistic to monitor memory management by plugin.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\n#### Netdata flags.\n\nTo have these charts you need to compile netdata with flag `NETDATA_DEV_MODE`.\n\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Process instance\n\nHow plugin is allocating memory.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_threads | total, running | threads |\n| netdata.ebpf_pids | user, kernel | pids |\n| netdata.ebpf_load_methods | legacy, co-re | methods |\n| netdata.ebpf_kernel_memory | memory_locked | bytes |\n| netdata.ebpf_hash_tables_count | hash_table | hash tables |\n| netdata.ebpf_hash_tables_insert_pid_elements | thread | rows |\n| netdata.ebpf_hash_tables_remove_pid_elements | thread | rows |\n| netdata.ebpf_ipc_usage | positions | % |\n\n",integration_type:"collector",id:"ebpf.plugin-process-eBPF_Process",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"processes",monitored_instance:{name:"eBPF Processes",link:"https://kernel.org/",categories:["data-collection.operating-systems"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""},keywords:["thread","fork","process","eBPF"]},overview:'# eBPF Processes\n\nPlugin: ebpf.plugin\nModule: processes\n\n## Overview\n\nMonitor calls for function creating tasks (threads and processes) inside Linux kernel.\n\nAttach tracing (kprobe or tracepoint, and trampoline) to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\neBPF Processes can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n',setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation. | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/process.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/process.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Processes instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.process_thread | process | calls/s |\n| system.process_status | process, zombie | difference |\n| system.exit | process | calls/s |\n| system.task_error | task | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.process_create | calls | calls/s |\n| app.thread_create | call | calls/s |\n| app.task_exit | call | calls/s |\n| app.task_close | call | calls/s |\n| app.task_error | app | calls/s |\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.process_create | process | calls/s |\n| cgroup.thread_create | thread | calls/s |\n| cgroup.task_exit | exit | calls/s |\n| cgroup.task_close | process | calls/s |\n| cgroup.task_error | process | calls/s |\n| services.process_create | a dimension per systemd service | calls/s |\n| services.thread_create | a dimension per systemd service | calls/s |\n| services.task_close | a dimension per systemd service | calls/s |\n| services.task_exit | a dimension per systemd service | calls/s |\n| services.task_error | a dimension per systemd service | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-processes-eBPF_Processes",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"shm",monitored_instance:{name:"eBPF SHM",link:"https://kernel.org/",categories:["data-collection.operating-systems"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""},keywords:["syscall","shared memory","eBPF"]},overview:'# eBPF SHM\n\nPlugin: ebpf.plugin\nModule: shm\n\n## Overview\n\nMonitor syscall responsible to manipulate shared memory.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\neBPF SHM can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n',setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| shmget | Enable or disable monitoring for syscall `shmget` | yes | no |\n| shmat | Enable or disable monitoring for syscall `shmat` | yes | no |\n| shmdt | Enable or disable monitoring for syscall `shmdt` | yes | no |\n| shmctl | Enable or disable monitoring for syscall `shmctl` | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/shm.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/shm.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.shmget | get | calls/s |\n| cgroup.shmat | at | calls/s |\n| cgroup.shmdt | dt | calls/s |\n| cgroup.shmctl | ctl | calls/s |\n| services.shmget | a dimension per systemd service | calls/s |\n| services.shmat | a dimension per systemd service | calls/s |\n| services.shmdt | a dimension per systemd service | calls/s |\n| services.shmctl | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_shmget_call | calls | calls/s |\n| app.ebpf_shmat_call | calls | calls/s |\n| app.ebpf_shmdt_call | calls | calls/s |\n| app.ebpf_shmctl_call | calls | calls/s |\n\n### Per eBPF SHM instance\n\nThese Metrics show number of calls for specified syscall.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.shared_memory_calls | get, at, dt, ctl | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-shm-eBPF_SHM",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"socket",monitored_instance:{name:"eBPF Socket",link:"https://kernel.org/",categories:["data-collection.networking"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""},keywords:["TCP","UDP","bandwidth","server","connection","socket"]},overview:'# eBPF Socket\n\nPlugin: ebpf.plugin\nModule: socket\n\n## Overview\n\nMonitor bandwidth consumption per application for protocols TCP and UDP.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\neBPF Socket can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n',setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### Options\n\nAll options are defined inside section `[global]`. Options inside `network connections` are ignored for while.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| bandwidth table size | Number of elements stored inside hash tables used to monitor calls per PID. | 16384 | no |\n| ipv4 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV4 connections. | 16384 | no |\n| ipv6 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV6 connections. | 16384 | no |\n| udp connection table size | Number of temporary elements stored inside hash tables used to monitor UDP connections. | 4096 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/network.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/network.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Socket instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.inbound_conn | connection_tcp | connections/s |\n| ip.tcp_outbound_conn | received | connections/s |\n| ip.tcp_functions | received, send, closed | calls/s |\n| ip.total_tcp_bandwidth | received, send | kilobits/s |\n| ip.tcp_error | received, send | calls/s |\n| ip.tcp_retransmit | retransmited | calls/s |\n| ip.udp_functions | received, send | calls/s |\n| ip.total_udp_bandwidth | received, send | kilobits/s |\n| ip.udp_error | received, send | calls/s |\n\n### Per apps\n\nThese metrics show grouped information per apps group.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_tcp_v4_connection | connections | connections/s |\n| app.ebpf_call_tcp_v6_connection | connections | connections/s |\n| app.ebpf_sock_total_bandwidth | received, sent | kilobits/s |\n| app.ebpf_call_tcp_sendmsg | calls | calls/s |\n| app.ebpf_call_tcp_cleanup_rbuf | calls | calls/s |\n| app.ebpf_call_tcp_retransmit | calls | calls/s |\n| app.ebpf_call_udp_sendmsg | calls | calls/s |\n| app.ebpf_call_udp_recvmsg | calls | calls/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_conn_ipv4 | connections | connections/s |\n| cgroup.net_conn_ipv6 | connections | connections/s |\n| cgroup.net_total_bandwidth | received, sent | kilobits/s |\n| cgroup.net_tcp_recv | calls | calls/s |\n| cgroup.net_tcp_send | calls | calls/s |\n| cgroup.net_retransmit | calls | calls/s |\n| cgroup.net_udp_send | calls | calls/s |\n| cgroup.net_udp_recv | calls | calls/s |\n| services.net_conn_ipv4 | connections | connections/s |\n| services.net_conn_ipv6 | connections | connections/s |\n| services.net_total_bandwidth | received, sent | kilobits/s |\n| services.net_tcp_recv | calls | calls/s |\n| services.net_tcp_send | calls | calls/s |\n| services.net_tcp_retransmit | calls | calls/s |\n| services.net_udp_send | calls | calls/s |\n| services.net_udp_recv | calls | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-socket-eBPF_Socket",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"softirq",monitored_instance:{name:"eBPF SoftIRQ",link:"https://kernel.org/",categories:["data-collection.operating-systems"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["SoftIRQ","eBPF"]},overview:"# eBPF SoftIRQ\n\nPlugin: ebpf.plugin\nModule: softirq\n\n## Overview\n\nMonitor latency for each SoftIRQ available.\n\nAttach kprobe to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/softirq.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/softirq.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF SoftIRQ instance\n\nThese metrics show latest timestamp for each softIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirq_latency | soft IRQs | milliseconds |\n\n",integration_type:"collector",id:"ebpf.plugin-softirq-eBPF_SoftIRQ",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"swap",monitored_instance:{name:"eBPF SWAP",link:"https://kernel.org/",categories:["data-collection.operating-systems"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""},keywords:["SWAP","memory","eBPF","Hard Disk"]},overview:'# eBPF SWAP\n\nPlugin: ebpf.plugin\nModule: swap\n\n## Overview\n\nMonitors when swap has I/O events and applications executing events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\neBPF SWAP can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n',setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/swap.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/swap.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.swap_read | read | calls/s |\n| cgroup.swap_write | write | calls/s |\n| services.swap_read | a dimension per systemd service | calls/s |\n| services.swap_write | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_swap_readpage | a dimension per app group | calls/s |\n| app.ebpf_call_swap_writepage | a dimension per app group | calls/s |\n\n### Per eBPF SWAP instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapcalls | write, read | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-swap-eBPF_SWAP",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"sync",monitored_instance:{name:"eBPF Sync",link:"https://kernel.org/",categories:["data-collection.storage"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["syscall","eBPF","hard disk","memory"]},overview:"# eBPF Sync\n\nPlugin: ebpf.plugin\nModule: sync\n\n## Overview\n\nMonitor syscall responsible to move data from memory to storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).\n\n\n\n### Configuration\n\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| sync | Enable or disable monitoring for syscall `sync` | yes | no |\n| msync | Enable or disable monitoring for syscall `msync` | yes | no |\n| fsync | Enable or disable monitoring for syscall `fsync` | yes | no |\n| fdatasync | Enable or disable monitoring for syscall `fdatasync` | yes | no |\n| syncfs | Enable or disable monitoring for syscall `syncfs` | yes | no |\n| sync_file_range | Enable or disable monitoring for syscall `sync_file_range` | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/sync.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/sync.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ sync_freq ](https://github.com/netdata/netdata/blob/master/src/health/health.d/synchronization.conf) | mem.sync | number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the underlying filesystems. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Sync instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.file_sync | fsync, fdatasync | calls/s |\n| mem.memory_map | msync | calls/s |\n| mem.sync | sync, syncfs | calls/s |\n| mem.file_segment | sync_file_range | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-sync-eBPF_Sync",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ebpf.plugin",module_name:"vfs",monitored_instance:{name:"eBPF VFS",link:"https://kernel.org/",categories:["data-collection.storage"],icon_filename:"ebpf.jpg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""},keywords:["virtual","filesystem","eBPF","I/O","files"]},overview:'# eBPF VFS\n\nPlugin: ebpf.plugin\nModule: vfs\n\n## Overview\n\nMonitor I/O events on Linux Virtual Filesystem.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\neBPF VFS can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n',setup:'## Setup\n\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ebpf.d/vfs.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/vfs.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.vfs_unlink | delete | calls/s |\n| cgroup.vfs_write | write | calls/s |\n| cgroup.vfs_write_error | write | calls/s |\n| cgroup.vfs_read | read | calls/s |\n| cgroup.vfs_read_error | read | calls/s |\n| cgroup.vfs_write_bytes | write | bytes/s |\n| cgroup.vfs_read_bytes | read | bytes/s |\n| cgroup.vfs_fsync | fsync | calls/s |\n| cgroup.vfs_fsync_error | fsync | calls/s |\n| cgroup.vfs_open | open | calls/s |\n| cgroup.vfs_open_error | open | calls/s |\n| cgroup.vfs_create | create | calls/s |\n| cgroup.vfs_create_error | create | calls/s |\n| services.vfs_unlink | a dimension per systemd service | calls/s |\n| services.vfs_write | a dimension per systemd service | calls/s |\n| services.vfs_write_error | a dimension per systemd service | calls/s |\n| services.vfs_read | a dimension per systemd service | calls/s |\n| services.vfs_read_error | a dimension per systemd service | calls/s |\n| services.vfs_write_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_read_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_fsync | a dimension per systemd service | calls/s |\n| services.vfs_fsync_error | a dimension per systemd service | calls/s |\n| services.vfs_open | a dimension per systemd service | calls/s |\n| services.vfs_open_error | a dimension per systemd service | calls/s |\n| services.vfs_create | a dimension per systemd service | calls/s |\n| services.vfs_create_error | a dimension per systemd service | calls/s |\n\n### Per eBPF VFS instance\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.vfs_deleted_objects | delete | calls/s |\n| filesystem.vfs_io | read, write | calls/s |\n| filesystem.vfs_io_bytes | read, write | bytes/s |\n| filesystem.vfs_io_error | read, write | calls/s |\n| filesystem.vfs_fsync | fsync | calls/s |\n| filesystem.vfs_fsync_error | fsync | calls/s |\n| filesystem.vfs_open | open | calls/s |\n| filesystem.vfs_open_error | open | calls/s |\n| filesystem.vfs_create | create | calls/s |\n| filesystem.vfs_create_error | create | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_vfs_unlink | calls | calls/s |\n| app.ebpf_call_vfs_write | calls | calls/s |\n| app.ebpf_call_vfs_write_error | calls | calls/s |\n| app.ebpf_call_vfs_read | calls | calls/s |\n| app.ebpf_call_vfs_read_error | calls | calls/s |\n| app.ebpf_call_vfs_write_bytes | writes | bytes/s |\n| app.ebpf_call_vfs_read_bytes | reads | bytes/s |\n| app.ebpf_call_vfs_fsync | calls | calls/s |\n| app.ebpf_call_vfs_fsync_error | calls | calls/s |\n| app.ebpf_call_vfs_open | calls | calls/s |\n| app.ebpf_call_vfs_open_error | calls | calls/s |\n| app.ebpf_call_vfs_create | calls | calls/s |\n| app.ebpf_call_vfs_create_error | calls | calls/s |\n\n",integration_type:"collector",id:"ebpf.plugin-vfs-eBPF_VFS",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"dev.cpu.0.freq",monitored_instance:{name:"dev.cpu.0.freq",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# dev.cpu.0.freq\n\nPlugin: freebsd.plugin\nModule: dev.cpu.0.freq\n\n## Overview\n\nRead current CPU Scaling frequency.\n\nCurrent CPU Scaling Frequency\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| dev.cpu.0.freq | Enable or disable CPU Scaling frequency metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `Config options`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config Config options\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.0.freq instance\n\nThe metric shows status of CPU frequency, it is direct affected by system load.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.scaling_cur_freq | frequency | MHz |\n\n",integration_type:"collector",id:"freebsd.plugin-dev.cpu.0.freq-dev.cpu.0.freq",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"dev.cpu.temperature",monitored_instance:{name:"dev.cpu.temperature",link:"https://www.freebsd.org/",categories:["data-collection.hardware-and-sensors"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# dev.cpu.temperature\n\nPlugin: freebsd.plugin\nModule: dev.cpu.temperature\n\n## Overview\n\nGet current CPU temperature\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| dev.cpu.temperature | Enable or disable CPU temperature metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.temperature instance\n\nThis metric show latest CPU temperature.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.temperature | a dimension per core | Celsius |\n\n",integration_type:"collector",id:"freebsd.plugin-dev.cpu.temperature-dev.cpu.temperature",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"devstat",monitored_instance:{name:"devstat",link:"https://www.freebsd.org/",categories:["data-collection.storage"],icon_filename:"hard-drive.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# devstat\n\nPlugin: freebsd.plugin\nModule: devstat\n\n## Overview\n\nCollect information per hard disk available on host.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enable new disks detected at runtime | Enable or disable possibility to detect new disks. | auto | no |\n| performance metrics for pass devices | Enable or disable metrics for disks with type `PASS`. | auto | no |\n| total bandwidth for all disks | Enable or disable total bandwidth metric for all disks. | yes | no |\n| bandwidth for all disks | Enable or disable bandwidth for all disks metric. | auto | no |\n| operations for all disks | Enable or disable operations for all disks metric. | auto | no |\n| queued operations for all disks | Enable or disable queued operations for all disks  metric. | auto | no |\n| utilization percentage for all disks | Enable or disable utilization percentage for all disks metric. | auto | no |\n| i/o time for all disks | Enable or disable I/O time for all disks metric. | auto | no |\n| average completed i/o time for all disks | Enable or disable average completed I/O time for all disks metric. | auto | no |\n| average completed i/o bandwidth for all disks | Enable or disable average completed I/O bandwidth for all disks metric. | auto | no |\n| average service time for all disks | Enable or disable average service time for all disks metric. | auto | no |\n| disable by default disks matching | Do not create charts for disks listed. |  | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:kern.devstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per devstat instance\n\nThese metrics give a general vision about I/O events on disks.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | io, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes, frees | KiB/s |\n| disk.ops | reads, writes, other, frees | operations/s |\n| disk.qops | operations | operations |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes, other, frees | milliseconds/s |\n| disk.await | reads, writes, other, frees | milliseconds/operation |\n| disk.avgsz | reads, writes, frees | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n",integration_type:"collector",id:"freebsd.plugin-devstat-devstat",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"getifaddrs",monitored_instance:{name:"getifaddrs",link:"https://www.freebsd.org/",categories:["data-collection.networking"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# getifaddrs\n\nPlugin: freebsd.plugin\nModule: getifaddrs\n\n## Overview\n\nCollect traffic per network interface.\n\nThe plugin calls `getifaddrs` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enable new interfaces detected at runtime | Enable or disable possibility to discover new interface after plugin starts. | auto | no |\n| total bandwidth for physical interfaces | Enable or disable total bandwidth for physical interfaces  metric. | auto | no |\n| total packets for physical interfaces | Enable or disable total packets for physical interfaces metric. | auto | no |\n| total bandwidth for ipv4 interface | Enable or disable total bandwidth for IPv4 interface metric. | auto | no |\n| total bandwidth for ipv6 interfaces | Enable or disable total bandwidth for ipv6 interfaces metric. | auto | no |\n| bandwidth for all interfaces | Enable or disable bandwidth for all interfaces metric. | auto | no |\n| packets for all interfaces | Enable or disable packets for all interfaces metric. | auto | no |\n| errors for all interfaces | Enable or disable errors for all interfaces metric. | auto | no |\n| drops for all interfaces | Enable or disable drops for all interfaces metric. | auto | no |\n| collisions for all interface | Enable or disable collisions for all interface metric. | auto | no |\n| disable by default interfaces matching | Do not display data for intterfaces listed. | lo* | no |\n| set physical interfaces for system.net | Do not show network traffic for listed interfaces. | igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc* | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getifaddrs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ interface_inbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of inbound errors for the network interface ${label:device} in the last 10 minutes |\n| [ interface_outbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of outbound errors for the network interface ${label:device} in the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per getifaddrs instance\n\nGeneral overview about network traffic.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n| system.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| system.ipv4 | received, sent | kilobits/s |\n| system.ipv6 | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.events | collisions | events/s |\n\n",integration_type:"collector",id:"freebsd.plugin-getifaddrs-getifaddrs",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"getmntinfo",monitored_instance:{name:"getmntinfo",link:"https://www.freebsd.org/",categories:["data-collection.storage"],icon_filename:"hard-drive.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# getmntinfo\n\nPlugin: freebsd.plugin\nModule: getmntinfo\n\n## Overview\n\nCollect information per mount point.\n\nThe plugin calls `getmntinfo` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enable new mount points detected at runtime | Cheeck new mount points during runtime. | auto | no |\n| space usage for all disks | Enable or disable space usage for all disks metric. | auto | no |\n| inodes usage for all disks | Enable or disable inodes usage for all disks metric. | auto | no |\n| exclude space metrics on paths | Do not show metrics for listed paths. | /proc/* | no |\n| exclude space metrics on filesystems | Do not monitor listed filesystems. | autofs procfs subfs devfs none | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getmntinfo]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\nThese metrics show detailss about mount point usages.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n",integration_type:"collector",id:"freebsd.plugin-getmntinfo-getmntinfo",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"hw.intrcnt",monitored_instance:{name:"hw.intrcnt",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# hw.intrcnt\n\nPlugin: freebsd.plugin\nModule: hw.intrcnt\n\n## Overview\n\nGet total number of interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config option" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| hw.intrcnt | Enable or disable Interrupts metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per hw.intrcnt instance\n\nThese metrics show system interrupts frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.intr | interrupts | interrupts/s |\n| system.interrupts | a dimension per interrupt | interrupts/s |\n\n",integration_type:"collector",id:"freebsd.plugin-hw.intrcnt-hw.intrcnt",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"ipfw",monitored_instance:{name:"ipfw",link:"https://www.freebsd.org/",categories:["data-collection.networking"],icon_filename:"firewall.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# ipfw\n\nPlugin: freebsd.plugin\nModule: ipfw\n\n## Overview\n\nCollect information about FreeBSD firewall.\n\nThe plugin uses RAW socket to communicate with kernel and collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| counters for static rules | Enable or disable counters for static rules  metric. | yes | no |\n| number of dynamic rules | Enable or disable number of dynamic rules metric. | yes | no |\n| allocated memory | Enable or disable allocated memory metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:ipfw]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ipfw instance\n\nTheese metrics show FreeBSD firewall statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfw.mem | dynamic, static | bytes |\n| ipfw.packets | a dimension per static rule | packets/s |\n| ipfw.bytes | a dimension per static rule | bytes/s |\n| ipfw.active | a dimension per dynamic rule | rules |\n| ipfw.expired | a dimension per dynamic rule | rules |\n\n",integration_type:"collector",id:"freebsd.plugin-ipfw-ipfw",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"kern.cp_time",monitored_instance:{name:"kern.cp_time",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# kern.cp_time\n\nPlugin: freebsd.plugin\nModule: kern.cp_time\n\n## Overview\n\nTotal CPU utilization\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe netdata main configuration file.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| kern.cp_time | Enable or disable Total CPU usage. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding nice) |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.cp_time instance\n\nThese metrics show CPU usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | nice, system, user, interrupt, idle | percentage |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | nice, system, user, interrupt, idle | percentage |\n\n",integration_type:"collector",id:"freebsd.plugin-kern.cp_time-kern.cp_time",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"kern.ipc.msq",monitored_instance:{name:"kern.ipc.msq",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# kern.ipc.msq\n\nPlugin: freebsd.plugin\nModule: kern.ipc.msq\n\n## Overview\n\nCollect number of IPC message Queues\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| kern.ipc.msq | Enable or disable IPC message queue metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.msq instance\n\nThese metrics show statistics IPC messages statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_msq_queues | queues | queues |\n| system.ipc_msq_messages | messages | messages |\n| system.ipc_msq_size | allocated, used | bytes |\n\n",integration_type:"collector",id:"freebsd.plugin-kern.ipc.msq-kern.ipc.msq",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"kern.ipc.sem",monitored_instance:{name:"kern.ipc.sem",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# kern.ipc.sem\n\nPlugin: freebsd.plugin\nModule: kern.ipc.sem\n\n## Overview\n\nCollect information about semaphore.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| kern.ipc.sem | Enable or disable semaphore metrics. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.sem instance\n\nThese metrics shows counters for semaphores on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n\n",integration_type:"collector",id:"freebsd.plugin-kern.ipc.sem-kern.ipc.sem",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"kern.ipc.shm",monitored_instance:{name:"kern.ipc.shm",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"memory.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# kern.ipc.shm\n\nPlugin: freebsd.plugin\nModule: kern.ipc.shm\n\n## Overview\n\nCollect shared memory information.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| kern.ipc.shm | Enable or disable shared memory metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.shm instance\n\nThese metrics give status about current shared memory segments.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_shared_mem_segs | segments | segments |\n| system.ipc_shared_mem_size | allocated | KiB |\n\n",integration_type:"collector",id:"freebsd.plugin-kern.ipc.shm-kern.ipc.shm",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet.icmp.stats",monitored_instance:{name:"net.inet.icmp.stats",link:"https://www.freebsd.org/",categories:["data-collection.networking"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# net.inet.icmp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.icmp.stats\n\n## Overview\n\nCollect information about ICMP traffic.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| IPv4 ICMP packets | Enable or disable IPv4 ICMP packets metric. | yes | no |\n| IPv4 ICMP error | Enable or disable IPv4 ICMP error metric. | yes | no |\n| IPv4 ICMP messages | Enable or disable IPv4 ICMP messages metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.icmp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.icmp.stats instance\n\nThese metrics show ICMP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet.icmp.stats-net.inet.icmp.stats",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet.ip.stats",monitored_instance:{name:"net.inet.ip.stats",link:"https://www.freebsd.org/",categories:["data-collection.networking"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# net.inet.ip.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.ip.stats\n\n## Overview\n\nCollect IP stats\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| ipv4 packets | Enable or disable IPv4 packets metric. | yes | no |\n| ipv4 fragments sent | Enable or disable IPv4 fragments sent metric. | yes | no |\n| ipv4 fragments assembly | Enable or disable IPv4 fragments assembly metric. | yes | no |\n| ipv4 errors | Enable or disable IPv4 errors metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.ip.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.ip.stats instance\n\nThese metrics show IPv4 connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet.ip.stats-net.inet.ip.stats",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet.tcp.states",monitored_instance:{name:"net.inet.tcp.states",link:"https://www.freebsd.org/",categories:["data-collection.networking"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# net.inet.tcp.states\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.states\n\n## Overview\n\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| net.inet.tcp.states | Enable or disable TCP state metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ipv4.tcpsock | IPv4 TCP connections utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.states instance\n\nA counter for TCP connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcpsock | connections | active connections |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet.tcp.states-net.inet.tcp.states",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet.tcp.stats",monitored_instance:{name:"net.inet.tcp.stats",link:"https://www.freebsd.org/",categories:["data-collection.networking"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# net.inet.tcp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.stats\n\n## Overview\n\nCollect overall information about TCP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| ipv4 TCP packets | Enable or disable ipv4 TCP packets metric. | yes | no |\n| ipv4 TCP errors | Enable or disable pv4 TCP errors metric. | yes | no |\n| ipv4 TCP handshake issues | Enable or disable ipv4 TCP handshake issue metric. | yes | no |\n| TCP connection aborts | Enable or disable TCP connection aborts metric. | auto | no |\n| TCP out-of-order queue | Enable or disable TCP out-of-order queue metric. | auto | no |\n| TCP SYN cookies | Enable or disable TCP SYN cookies metric. | auto | no |\n| TCP listen issues | Enable or disable TCP listen issues metric. | auto | no |\n| ECN packets | Enable or disable ECN packets metric. | auto | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.tcp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.stats instance\n\nThese metrics show TCP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.tcplistenissues | overflows | packets/s |\n| ipv4.ecnpkts | InCEPkts, InECT0Pkts, InECT1Pkts, OutECT0Pkts, OutECT1Pkts | packets/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet.tcp.stats-net.inet.tcp.stats",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet.udp.stats",monitored_instance:{name:"net.inet.udp.stats",link:"https://www.freebsd.org/",categories:["data-collection.networking"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# net.inet.udp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.udp.stats\n\n## Overview\n\nCollect information about UDP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| ipv4 UDP packets | Enable or disable ipv4 UDP packets metric. | yes | no |\n| ipv4 UDP errors | Enable or disable ipv4 UDP errors metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.udp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.udp.stats instance\n\nThese metrics show UDP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | InErrors, NoPorts, RcvbufErrors, InCsumErrors, IgnoredMulti | events/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet.udp.stats-net.inet.udp.stats",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet6.icmp6.stats",monitored_instance:{name:"net.inet6.icmp6.stats",link:"https://www.freebsd.org/",categories:["data-collection.networking"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# net.inet6.icmp6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.icmp6.stats\n\n## Overview\n\nCollect information abou IPv6 ICMP\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| icmp | Enable or disable ICMP metric. | auto | no |\n| icmp redirects | Enable or disable ICMP redirects metric. | auto | no |\n| icmp errors | Enable or disable ICMP errors metric. | auto | no |\n| icmp echos | Enable or disable ICMP echos metric. | auto | no |\n| icmp router | Enable or disable ICMP router metric. | auto | no |\n| icmp neighbor | Enable or disable ICMP neighbor metric. | auto | no |\n| icmp types | Enable or disable ICMP types metric. | auto | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.icmp6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.icmp6.stats instance\n\nCollect IPv6 ICMP traffic statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet6.icmp6.stats-net.inet6.icmp6.stats",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.inet6.ip6.stats",monitored_instance:{name:"net.inet6.ip6.stats",link:"https://www.freebsd.org/",categories:["data-collection.networking"],icon_filename:"network.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# net.inet6.ip6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.ip6.stats\n\n## Overview\n\nCollect information abou IPv6 stats.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| ipv6 packets | Enable or disable ipv6 packet metric. | auto | no |\n| ipv6 fragments sent | Enable or disable ipv6 fragments sent metric. | auto | no |\n| ipv6 fragments assembly | Enable or disable ipv6 fragments assembly metric. | auto | no |\n| ipv6 errors | Enable or disable ipv6 errors metric. | auto | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.ip6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.ip6.stats instance\n\nThese metrics show general information about IPv6 connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.inet6.ip6.stats-net.inet6.ip6.stats",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"net.isr",monitored_instance:{name:"net.isr",link:"https://www.freebsd.org/",categories:["data-collection.networking"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# net.isr\n\nPlugin: freebsd.plugin\nModule: net.isr\n\n## Overview\n\nCollect information about system softnet stat.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| netisr | Enable or disable general vision about softnet stat metrics. | yes | no |\n| netisr per core | Enable or disable softnet stat metric per core. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.isr]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n| [ 10min_netisr_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of drops in the last minute due to exceeded sysctl net.route.netisr_maxqlen (this can be a cause for dropped packets) |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.isr instance\n\nThese metrics show statistics about softnet stats.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n",integration_type:"collector",id:"freebsd.plugin-net.isr-net.isr",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"system.ram",monitored_instance:{name:"system.ram",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"memory.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# system.ram\n\nPlugin: freebsd.plugin\nModule: system.ram\n\n## Overview\n\nShow information about system memory usage.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| system.ram | Enable or disable system RAM metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per system.ram instance\n\nThis metric shows RAM usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, active, inactive, wired, cache, laundry, buffers | MiB |\n| mem.available | avail | MiB |\n\n",integration_type:"collector",id:"freebsd.plugin-system.ram-system.ram",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"uptime",monitored_instance:{name:"uptime",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# uptime\n\nPlugin: freebsd.plugin\nModule: uptime\n\n## Overview\n\nShow period of time server is up.\n\nThe plugin calls `clock_gettime` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uptime instance\n\nHow long the system is running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"freebsd.plugin-uptime-uptime",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.loadavg",monitored_instance:{name:"vm.loadavg",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# vm.loadavg\n\nPlugin: freebsd.plugin\nModule: vm.loadavg\n\n## Overview\n\nSystem Load Average\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.loadavg instance\n\nMonitoring for number of threads running or waiting.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.loadavg-vm.loadavg",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.stats.sys.v_intr",monitored_instance:{name:"vm.stats.sys.v_intr",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# vm.stats.sys.v_intr\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_intr\n\n## Overview\n\nDevice interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config option" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| vm.stats.sys.v_intr | Enable or disable device interrupts metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_intr instance\n\nThe metric show device interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.dev_intr | interrupts | interrupts/s |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.stats.sys.v_intr-vm.stats.sys.v_intr",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.stats.sys.v_soft",monitored_instance:{name:"vm.stats.sys.v_soft",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# vm.stats.sys.v_soft\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_soft\n\n## Overview\n\nSoftware Interrupt\n\nvm.stats.sys.v_soft\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config option" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| vm.stats.sys.v_soft | Enable or disable software inerrupts metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_soft instance\n\nThis metric shows software interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.soft_intr | interrupts | interrupts/s |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.stats.sys.v_soft-vm.stats.sys.v_soft",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.stats.sys.v_swtch",monitored_instance:{name:"vm.stats.sys.v_swtch",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# vm.stats.sys.v_swtch\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_swtch\n\n## Overview\n\nCPU context switch\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| vm.stats.sys.v_swtch | Enable or disable CPU context switch metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_swtch instance\n\nThe metric count the number of context switches happening on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.stats.sys.v_swtch-vm.stats.sys.v_swtch",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.stats.vm.v_pgfaults",monitored_instance:{name:"vm.stats.vm.v_pgfaults",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"memory.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# vm.stats.vm.v_pgfaults\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_pgfaults\n\n## Overview\n\nCollect memory page faults events.\n\nThe plugin calls `sysctl` function to collect necessary data\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| vm.stats.vm.v_pgfaults | Enable or disable Memory page fault metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_pgfaults instance\n\nThe number of page faults happened on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pgfaults | memory, io_requiring, cow, cow_optimized, in_transit | page faults/s |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.stats.vm.v_pgfaults-vm.stats.vm.v_pgfaults",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.stats.vm.v_swappgs",monitored_instance:{name:"vm.stats.vm.v_swappgs",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"memory.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# vm.stats.vm.v_swappgs\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_swappgs\n\n## Overview\n\nThe metric swap amount of data read from and written to SWAP.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| vm.stats.vm.v_swappgs | Enable or disable infoormation about SWAP I/O metric. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_swappgs instance\n\nThis metric shows events happening on SWAP.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | io, out | KiB/s |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.stats.vm.v_swappgs-vm.stats.vm.v_swappgs",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.swap_info",monitored_instance:{name:"vm.swap_info",link:"",categories:["data-collection.operating-systems"],icon_filename:"freebsd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# vm.swap_info\n\nPlugin: freebsd.plugin\nModule: vm.swap_info\n\n## Overview\n\nCollect information about SWAP memory.\n\nThe plugin calls `sysctlnametomib` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| vm.swap_info | Enable or disable SWAP metrics. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.swap_info instance\n\nThis metric shows the SWAP usage.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swap | free, used | MiB |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.swap_info-vm.swap_info",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"vm.vmtotal",monitored_instance:{name:"vm.vmtotal",link:"https://www.freebsd.org/",categories:["data-collection.operating-systems"],icon_filename:"memory.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# vm.vmtotal\n\nPlugin: freebsd.plugin\nModule: vm.vmtotal\n\n## Overview\n\nCollect Virtual Memory information from host.\n\nThe plugin calls function `sysctl` to collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enable total processes | Number of active processes. | yes | no |\n| processes running | Show number of processes running or blocked. | yes | no |\n| real memory | Memeory used on host. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:vm.vmtotal]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.vmtotal instance\n\nThese metrics show an overall vision about processes running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.active_processes | active | processes |\n| system.processes | running, blocked | processes |\n| mem.real | used | MiB |\n\n",integration_type:"collector",id:"freebsd.plugin-vm.vmtotal-vm.vmtotal",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freebsd.plugin",module_name:"zfs",monitored_instance:{name:"zfs",link:"https://www.freebsd.org/",categories:["data-collection.storage"],icon_filename:"filesystem.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# zfs\n\nPlugin: freebsd.plugin\nModule: zfs\n\n## Overview\n\nCollect metrics for ZFS filesystem\n\nThe plugin uses `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| show zero charts | Do not show charts with zero metrics. | no | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:zfs_arcstats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs instance\n\nThese metrics show detailed information about ZFS filesystem.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | throttled | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n| zfs.trim_bytes | TRIMmed | bytes |\n| zfs.trim_requests | successful, failed, unsupported | requests |\n\n",integration_type:"collector",id:"freebsd.plugin-zfs-zfs",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"freeipmi.plugin",module_name:"freeipmi",monitored_instance:{name:"Intelligent Platform Management Interface (IPMI)",link:"https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface",categories:["data-collection.hardware-and-sensors"],icon_filename:"netdata.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["sensors","ipmi","freeipmi","ipmimonitoring"]},overview:'# Intelligent Platform Management Interface (IPMI)\n\nPlugin: freeipmi.plugin\nModule: freeipmi\n\n## Overview\n\n"Monitor enterprise server sensor readings, event log entries, and hardware statuses to ensure reliable server operations."\n\n\nThe plugin uses open source library IPMImonitoring to communicate with sensors.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn\'t support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nLinux kernel module for IPMI can create big overhead.\n',setup:"## Setup\n\n\n### Prerequisites\n\n#### Install freeipmi.plugin\n\nWhen using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires.\n\nWhen using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin.\n\nWhen using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata.\n\n\n#### Preliminary actions\n\nIf you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root\nto initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system.\n\n\n\n### Configuration\n\n#### Options\n\nThe configuration is set using command line options:\n\n```\n# netdata.conf\n[plugin:freeipmi]\n  command options = opt1 opt2 ... optN\n```\n\nTo display a help message listing the available command line options:\n\n```bash\n./usr/libexec/netdata/plugins.d/freeipmi.plugin --help\n```\n\n\n{% details open=true summary=\"Command options\" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SECONDS | Data collection frequency. |  | no |\n| debug | Enable verbose output. | disabled | no |\n| no-sel | Disable System Event Log (SEL) collection. | disabled | no |\n| reread-sdr-cache | Re-read SDR cache on every iteration. | disabled | no |\n| interpret-oem-data | Attempt to parse OEM data. | disabled | no |\n| assume-system-event-record | treat illegal SEL events records as normal. | disabled | no |\n| ignore-non-interpretable-sensors | Do not read sensors that cannot be interpreted. | disabled | no |\n| bridge-sensors | Bridge sensors not owned by the BMC. | disabled | no |\n| shared-sensors | Enable shared sensors if found. | disabled | no |\n| no-discrete-reading | Do not read sensors if their event/reading type code is invalid. | enabled | no |\n| ignore-scanning-disabled | Ignore the scanning bit and read sensors no matter what. | disabled | no |\n| assume-bmc-owner | Assume the BMC is the sensor owner no matter what (usually bridging is required too). | disabled | no |\n| hostname HOST | Remote IPMI hostname or IP address. | local | no |\n| username USER | Username that will be used when connecting to the remote host. |  | no |\n| password PASS | Password that will be used when connecting to the remote host. |  | no |\n| noauthcodecheck / no-auth-code-check | Don't check the authentication codes returned. |  | no |\n| driver-type IPMIDRIVER | Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which  perform  IPMI  1.5  and  IPMI  2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. |  | no |\n| sdr-cache-dir PATH | SDR cache files directory. | /tmp | no |\n| sensor-config-file FILE | Sensors configuration filename. | system default | no |\n| sel-config-file FILE | SEL configuration filename. | system default | no |\n| ignore N1,N2,N3,... | Sensor IDs to ignore. |  | no |\n| ignore-status N1,N2,N3,... | Sensor IDs to ignore status (nominal/warning/critical). |  | no |\n| -v | Print version and exit. |  | no |\n| --help | Print usage message and exit. |  | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freeipmi]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\n\n###### Decrease data collection frequency\n\nBasic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.\n\n```yaml\n[plugin:freeipmi]\n  update every = 10\n\n```\n###### Disable SEL collection\n\nAppend to `command options =` the options you need.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:freeipmi]\n  command options = no-sel\n\n```\n{% /details %}\n###### Ignore specific sensors\n\nSpecific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`.\n\n**However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).\n\nTo find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:\n\nID  | Name             | Type                     | State    | Reading    | Units | Event\n1   | Ambient Temp     | Temperature              | Nominal  | 26.00      | C     | 'OK'\n2   | Altitude         | Other Units Based Sensor | Nominal  | 480.00     | ft    | 'OK'\n3   | Avg Power        | Current                  | Nominal  | 100.00     | W     | 'OK'\n4   | Planar 3.3V      | Voltage                  | Nominal  | 3.29       | V     | 'OK'\n5   | Planar 5V        | Voltage                  | Nominal  | 4.90       | V     | 'OK'\n6   | Planar 12V       | Voltage                  | Nominal  | 11.99      | V     | 'OK'\n7   | Planar VBAT      | Voltage                  | Nominal  | 2.95       | V     | 'OK'\n8   | Fan 1A Tach      | Fan                      | Nominal  | 3132.00    | RPM   | 'OK'\n9   | Fan 1B Tach      | Fan                      | Nominal  | 2150.00    | RPM   | 'OK'\n10  | Fan 2A Tach      | Fan                      | Nominal  | 2494.00    | RPM   | 'OK'\n11  | Fan 2B Tach      | Fan                      | Nominal  | 1825.00    | RPM   | 'OK'\n12  | Fan 3A Tach      | Fan                      | Nominal  | 3538.00    | RPM   | 'OK'\n13  | Fan 3B Tach      | Fan                      | Nominal  | 2625.00    | RPM   | 'OK'\n14  | Fan 1            | Entity Presence          | Nominal  | N/A        | N/A   | 'Entity Present'\n15  | Fan 2            | Entity Presence          | Nominal  | N/A        | N/A   | 'Entity Present'\n...\n\n`freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`:\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:freeipmi]\n      command options = ignore 1,2,3,4,...\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nYou can run `freeipmi.plugin` with the debug option enabled, to troubleshoot issues with it. The output should give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `freeipmi.plugin` in debug mode:\n\n  ```bash\n  ./freeipmi.plugin 5 debug\n  ```\n\n\n### kimpi0 CPU usage\n\nThere have been reports that kipmi is showing increased CPU when the IPMI is queried. To lower the CPU consumption of the system you can issue this command:\n\n```sh\necho 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us\n```\n\nYou can also permanently set the above setting by creating the file `/etc/modprobe.d/ipmi.conf` with this content:\n\n```sh\n# prevent kipmi from consuming 100% CPU\noptions ipmi_si kipmid_max_busy_us=10\n```\n\nThis instructs the kernel IPMI module to pause for a tick between checking IPMI. Querying IPMI will be a lot slower now (e.g. several seconds for IPMI to respond), but `kipmi` will not use any noticeable CPU.\n\nYou can also use a higher number (this is the number of microseconds to poll IPMI for a response, before waiting for a tick).\n\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipmi_sensor_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipmi.conf) | ipmi.sensor_state | IPMI sensor ${label:sensor} (${label:component}) state |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard.\n\n\n### Per Intelligent Platform Management Interface (IPMI) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sel | events | events |\n\n### Per sensor\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| sensor | The sensor name |\n| type | One of 45 recognized sensor types (Battery, Voltage...) |\n| component | One of 25 recognized components (Processor, Peripheral). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sensor_state | nominal, critical, warning, unknown | state |\n| ipmi.sensor_temperature_c | temperature | Celsius |\n| ipmi.sensor_temperature_f | temperature | Fahrenheit |\n| ipmi.sensor_voltage | voltage | Volts |\n| ipmi.sensor_ampere | ampere | Amps |\n| ipmi.sensor_fan_speed | rotations | RPM |\n| ipmi.sensor_power | power | Watts |\n| ipmi.sensor_reading_percent | percentage | % |\n\n",integration_type:"collector",id:"freeipmi.plugin-freeipmi-Intelligent_Platform_Management_Interface_(IPMI)",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/freeipmi.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-activemq",module_name:"activemq",plugin_name:"go.d.plugin",monitored_instance:{categories:["data-collection.databases"],icon_filename:"activemq.png",name:"ActiveMQ",link:"https://activemq.apache.org/"},alternative_monitored_instances:[],keywords:["message broker"],info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"apps.plugin",module_name:"apps"}]}}},overview:'# ActiveMQ\n\nPlugin: go.d.plugin\nModule: activemq\n\n## Overview\n\nThis collector monitors ActiveMQ queues and topics.\n\nIt collects metrics by sending HTTP requests to the Web Console API.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nActiveMQ can be monitored further using the following other integrations:\n\n- {% relatedResource id="go.d.plugin-httpcheck-HTTP_Endpoints" %}HTTP Endpoints{% /relatedResource %}\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 8161.\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8161\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **activemq** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **activemq**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/activemq.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8161 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n|  | webadmin | Webadmin root path. | admin | yes |\n| **Limits** | max_queues | Maximum number of queues to collect concurrently. | 50 | no |\n|  | max_topics | Maximum number of topics to collect concurrently. | 50 | no |\n| **Filters** | queues_filter | Queue selector. Uses [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). |  | no |\n|  | topics_filter | Topic selector. Uses [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **activemq** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the activemq data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _activemq_ (or scroll the list) to locate the **activemq** collector.\n5. Click the **+** next to the **activemq** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/activemq.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/activemq.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8161\n    webadmin: admin\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8161\n    webadmin: admin\n    username: foo\n    password: bar\n\n```\n{% /details %}\n###### Filters and limits\n\nUsing filters and limits for queues and topics.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8161\n    webadmin: admin\n    max_queues: 100\n    max_topics: 100\n    queues_filter: \'!sandr* *\'\n    topics_filter: \'!sandr* *\'\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8161\n    webadmin: admin\n\n  - name: remote\n    url: http://192.0.2.1:8161\n    webadmin: admin\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `activemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m activemq\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m activemq -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `activemq` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep activemq\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep activemq /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep activemq\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ActiveMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| activemq.messages | enqueued, dequeued | messages/s |\n| activemq.unprocessed_messages | unprocessed | messages |\n| activemq.consumers | consumers | consumers |\n\n",integration_type:"collector",id:"go.d.plugin-activemq-ActiveMQ",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/activemq/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-adaptecraid",plugin_name:"go.d.plugin",module_name:"adaptec_raid",monitored_instance:{name:"Adaptec RAID",link:"https://www.microchip.com/en-us/products/storage",icon_filename:"adaptec.svg",categories:["data-collection.storage"]},keywords:["storage","raid-controller","manage-disks"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Adaptec RAID\n\nPlugin: go.d.plugin\nModule: adaptec_raid\n\n## Overview\n\nMonitors the health of Adaptec Hardware RAID by tracking the status of logical and physical devices in your storage system.\nIt relies on the `arcconf` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n-  `arcconf GETCONFIG 1 LD`\n-  `arcconf GETCONFIG 1 PD`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **adaptec_raid** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **adaptec_raid**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/adaptec_raid.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | arcconf binary execution timeout. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **adaptec_raid** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the adaptec_raid data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _adaptec_raid_ (or scroll the list) to locate the **adaptec_raid** collector.\n5. Click the **+** next to the **adaptec_raid** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/adaptec_raid.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/adaptec_raid.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: adaptec_raid\n    update_every: 5  # Collect Adaptec Hardware RAID statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `adaptec_raid` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m adaptec_raid\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m adaptec_raid -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `adaptec_raid` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep adaptec_raid\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep adaptec_raid /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep adaptec_raid\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ adaptec_raid_ld_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptecraid.logical_device_status | Adaptec RAID logical device (number ${label:ld_number} name ${label:ld_name}) health status is critical |\n| [ adaptec_raid_pd_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptecraid.physical_device_state | Adaptec RAID physical device (number ${label:pd_number} location ${label:location}) health state is critical |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per logical device\n\nThese metrics refer to the Logical Device (LD).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| ld_number | Logical device index number |\n| ld_name | Logical device name |\n| raid_level | RAID level |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adaptecraid.logical_device_status | ok, critical | status |\n\n### Per physical device\n\nThese metrics refer to the Physical Device (PD).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| pd_number | Physical device index number |\n| location | Physical device location (e.g. Connector 0, Device 1) |\n| vendor | Physical device vendor |\n| model | Physical device model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adaptecraid.physical_device_state | ok, critical | status |\n| adaptecraid.physical_device_smart_warnings | smart | warnings |\n| adaptecraid.physical_device_temperature | temperature | Celsius |\n\n",integration_type:"collector",id:"go.d.plugin-adaptec_raid-Adaptec_RAID",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/adaptecraid/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"go.d.plugin",module_name:"ap",monitored_instance:{name:"Access Points",link:"",categories:["data-collection.networking"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ap","access","point","wireless","network"]},overview:"# Access Points\n\nPlugin: go.d.plugin\nModule: ap\n\n## Overview\n\nThis collector monitors various wireless access point metrics like connected clients, bandwidth, packets, transmit issues, signal strength, and bitrate for each device and its associated SSID.\n\n\nThis tool uses the `iw` command-line utility to discover nearby access points. It starts by running `iw dev`, which provides information about all wireless interfaces.  Then, for each interface identified as an access point (type AP), the `iw INTERFACE station dump` command is executed to gather relevant metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin is able to auto-detect any access points on your Linux machine.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **ap** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **ap**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/ap.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### `iw` utility.\n\nMake sure the `iw` utility is installed.\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `iw` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/sbin/iw | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **ap** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the ap data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _ap_ (or scroll the list) to locate the **ap** collector.\n5. Click the **+** next to the **ap** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/ap.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ap.conf\n```\n\n##### Examples\n\n###### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n  - name: custom_iw\n    binary_path: /usr/local/sbin/iw\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ap` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m ap\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m ap -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ap` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ap\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ap /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ap\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\nThese metrics refer to the entire monitored application.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | Wireless interface name |\n| ssid | SSID |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ap.clients | clients | clients |\n| ap.net | received, sent | kilobits/s |\n| ap.packets | received, sent | packets/s |\n| ap.issues | retries, failures | issues/s |\n| ap.signal | average signal | dBm |\n| ap.bitrate | receive, transmit | Mbps |\n\n",integration_type:"collector",id:"go.d.plugin-ap-Access_Points",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/ap/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-apache",plugin_name:"go.d.plugin",module_name:"apache",monitored_instance:{name:"Apache",link:"https://httpd.apache.org/",icon_filename:"apache.svg",categories:["data-collection.web-servers-and-proxies"]},keywords:["webserver"],related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"web_log"},{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# Apache\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nApache can be monitored further using the following other integrations:\n\n- {% relatedResource id="go.d.plugin-web_log-Web_server_log_files" %}Web server log files{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-httpcheck-HTTP_Endpoints" %}HTTP Endpoints{% /relatedResource %}\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **apache** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **apache**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/apache.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1/server-status?auto | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **apache** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the apache data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _apache_ (or scroll the list) to locate the **apache** collector.\n5. Click the **+** next to the **apache** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/server-status?auto\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/server-status?auto\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1/server-status?auto\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/server-status?auto\n\n  - name: remote\n    url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m apache\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m apache -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `apache` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep apache\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep apache /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep apache\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s |   | \u2022 |\n| apache.net | sent | kilobit/s |   | \u2022 |\n| apache.reqpersec | requests | requests/s |   | \u2022 |\n| apache.bytespersec | served | KiB/s |   | \u2022 |\n| apache.bytesperreq | size | KiB |   | \u2022 |\n| apache.uptime | uptime | seconds |   | \u2022 |\n\n",integration_type:"collector",id:"go.d.plugin-apache-Apache",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/apache/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-httpd",plugin_name:"go.d.plugin",module_name:"apache",monitored_instance:{name:"HTTPD",link:"https://httpd.apache.org/",icon_filename:"apache.svg",categories:["data-collection.web-servers-and-proxies"]},keywords:["webserver"],related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"web_log"},{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# HTTPD\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nHTTPD can be monitored further using the following other integrations:\n\n- {% relatedResource id="go.d.plugin-web_log-Web_server_log_files" %}Web server log files{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-httpcheck-HTTP_Endpoints" %}HTTP Endpoints{% /relatedResource %}\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **apache** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **apache**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/apache.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1/server-status?auto | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **apache** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the apache data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _apache_ (or scroll the list) to locate the **apache** collector.\n5. Click the **+** next to the **apache** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/server-status?auto\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/server-status?auto\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1/server-status?auto\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/server-status?auto\n\n  - name: remote\n    url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m apache\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m apache -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `apache` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep apache\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep apache /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep apache\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s |   | \u2022 |\n| apache.net | sent | kilobit/s |   | \u2022 |\n| apache.reqpersec | requests | requests/s |   | \u2022 |\n| apache.bytespersec | served | KiB/s |   | \u2022 |\n| apache.bytesperreq | size | KiB |   | \u2022 |\n| apache.uptime | uptime | seconds |   | \u2022 |\n\n",integration_type:"collector",id:"go.d.plugin-apache-HTTPD",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/apache/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-apcupsd",plugin_name:"go.d.plugin",module_name:"apcupsd",monitored_instance:{name:"APC UPS",link:"https://www.apc.com",icon_filename:"apc.svg",categories:["data-collection.hardware-and-sensors"]},keywords:["ups","apcupsd","apc"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# APC UPS\n\nPlugin: go.d.plugin\nModule: apcupsd\n\n## Overview\n\nThis collector monitors Uninterruptible Power Supplies by polling the Apcupsd daemon.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apcupsd instances running on localhost that are listening on port 3551.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:3551\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **apcupsd** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **apcupsd**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/apcupsd.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | apcupsd daemon address (`IP:PORT`). | 127.0.0.1:3551 | yes |\n|  | timeout | Connection, read, write, and name resolution timeout (seconds). | 2 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **apcupsd** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the apcupsd data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _apcupsd_ (or scroll the list) to locate the **apcupsd** collector.\n5. Click the **+** next to the **apcupsd** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/apcupsd.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apcupsd.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:3551\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:3551\n\n  - name: remote\n    address: 203.0.113.0:3551\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `apcupsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m apcupsd\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m apcupsd -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `apcupsd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep apcupsd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep apcupsd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep apcupsd\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ apcupsd_ups_load_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_load_capacity_utilization | APC UPS average load over the last 10 minutes |\n| [ apcupsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_battery_charge | APC UPS average battery charge over the last minute |\n| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS number of seconds since the last successful data collection |\n| [ apcupsd_ups_selftest_warning ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_selftest | APC UPS self-test failed due to insufficient battery capacity or due to overload |\n| [ apcupsd_ups_status_onbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS has switched to battery power because the input power has failed |\n| [ apcupsd_ups_status_overload ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS is overloaded and cannot supply enough power to the load |\n| [ apcupsd_ups_status_lowbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS battery is low and needs to be recharged |\n| [ apcupsd_ups_status_replacebatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS battery has reached the end of its lifespan and needs to be replaced |\n| [ apcupsd_ups_status_nobatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS has no battery |\n| [ apcupsd_ups_status_commlost ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.ups_status | APC UPS communication link is lost |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nThese metrics refer to the UPS unit.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| apcupsd.ups_status | TRIM, BOOST, CAL, ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, SHUTTING_DOWN | status |\n| apcupsd.ups_selftest | NO, NG, WN, IP, OK, BT, UNK | status |\n| apcupsd.ups_battery_charge | charge | percent |\n| apcupsd.ups_battery_time_remaining | timeleft | seconds |\n| apcupsd.ups_battery_time_since_replacement | since_replacement | seconds |\n| apcupsd.ups_battery_voltage | voltage, nominal_voltage | Volts |\n| apcupsd.ups_load_capacity_utilization | load | percent |\n| apcupsd.ups_load | load | Watts |\n| apcupsd.ups_temperature | temperature | Celsius |\n| apcupsd.ups_input_voltage | voltage, min_voltage, max_voltage | Volts |\n| apcupsd.ups_input_frequency | frequency | Hz |\n| apcupsd.ups_output_voltage | voltage | Volts |\n\n",integration_type:"collector",id:"go.d.plugin-apcupsd-APC_UPS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/apcupsd/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-beanstalk",plugin_name:"go.d.plugin",module_name:"beanstalk",monitored_instance:{name:"Beanstalk",link:"https://beanstalkd.github.io/",categories:["data-collection.databases"],icon_filename:"beanstalk.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["beanstalk","beanstalkd","message"]},overview:"# Beanstalk\n\nPlugin: go.d.plugin\nModule: beanstalk\n\n## Overview\n\nThis collector monitors Beanstalk server performance and provides detailed statistics for each tube.\n\n\nUsing the [beanstalkd protocol](https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt), it communicates with the Beanstalk daemon to gather essential metrics that help understand the server's performance and activity.\nExecuted commands:\n\n- [stats](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L553).\n- [list-tubes](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L688).\n- [stats-tube](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L497).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Beanstalk instances running on localhost that are listening on port 11300.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **beanstalk** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **beanstalk**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/beanstalk.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | Beanstalk service address (`IP:PORT`). | 127.0.0.1:11300 | yes |\n|  | timeout | Connection, read, write, and name resolution timeout (seconds). | 1 | no |\n| **Filters** | tube_selector | Tube selector. Defines which Beanstalk tubes to monitor. Uses [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme). | * | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **beanstalk** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the beanstalk data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _beanstalk_ (or scroll the list) to locate the **beanstalk** collector.\n5. Click the **+** next to the **beanstalk** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/beanstalk.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/beanstalk.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:11300\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:11300\n\n  - name: remote\n    address: 203.0.113.0:11300\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `beanstalk` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m beanstalk\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m beanstalk -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `beanstalk` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep beanstalk\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep beanstalk /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep beanstalk\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ beanstalk_server_buried_jobs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/beanstalkd.conf) | beanstalk.current_jobs | number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Beanstalk instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.current_jobs | ready, buried, urgent, delayed, reserved | jobs |\n| beanstalk.jobs_rate | created | jobs/s |\n| beanstalk.jobs_timeouts | timeouts | jobs/s |\n| beanstalk.current_tubes | tubes | tubes |\n| beanstalk.commands_rate | put, peek, peek-ready, peek-delayed, peek-buried, reserve, reserve-with-timeout, touch, use, watch, ignore, delete, bury, kick, stats, stats-job, stats-tube, list-tubes, list-tube-used, list-tubes-watched, pause-tube | commands/s |\n| beanstalk.current_connections | open, producers, workers, waiting | connections |\n| beanstalk.connections_rate | created | connections/s |\n| beanstalk.binlog_records | written, migrated | records/s |\n| beanstalk.cpu_usage | user, system | percent |\n| beanstalk.uptime | uptime | seconds |\n\n### Per tube\n\nMetrics related to Beanstalk tubes. This set of metrics is provided for each tube.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| tube_name | Tube name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.tube_current_jobs | ready, buried, urgent, delayed, reserved | jobs |\n| beanstalk.tube_jobs_rate | created | jobs/s |\n| beanstalk.tube_commands_rate | delete, pause-tube | commands/s |\n| beanstalk.tube_current_connections | using, waiting, watching | connections |\n| beanstalk.tube_pause_time | since, left | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-beanstalk-Beanstalk",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/beanstalk/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-boinc",plugin_name:"go.d.plugin",module_name:"boinc",monitored_instance:{name:"BOINC",link:"https://boinc.berkeley.edu/",categories:["data-collection.applications"],icon_filename:"bolt.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["boinc","distributed"]},overview:"# BOINC\n\nPlugin: go.d.plugin\nModule: boinc\n\n## Overview\n\nThis collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client.\n\n\nIt communicates with BOING using [GIU RPC Protocol](https://boinc.berkeley.edu/trac/wiki/GuiRpcProtocol).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects BOINC client instances running on localhost that are listening on port 31416.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:31416\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **boinc** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **boinc**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/boinc.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | The IP address and port where the BOINC client listens for connections. | 127.0.0.1:31416 | yes |\n| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| password | The GUI RPC password for authentication. |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **boinc** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the boinc data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _boinc_ (or scroll the list) to locate the **boinc** collector.\n5. Click the **+** next to the **boinc** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/boinc.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/boinc.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:31416\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:31416\n\n  - name: remote\n    address: 203.0.113.0:31416\n    password: somePassword\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `boinc` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m boinc\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m boinc -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `boinc` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep boinc\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep boinc /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep boinc\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes |\n| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes |\n| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks_state | average number of compute errors over the last 10 minutes |\n| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks_state | average number of failed uploads over the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per BOINC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| boinc.tasks | total, active | tasks |\n| boinc.tasks_per_state | new, downloading, downloaded, compute_error, uploading, uploaded, aborted, upload_failed | tasks |\n| boinc.active_tasks_per_state | uninitialized, executing, abort_pending, quit_pending, suspended, copy_pending | tasks |\n| boinc.active_tasks_per_scheduler_state | uninitialized, preempted, scheduled | tasks |\n\n",integration_type:"collector",id:"go.d.plugin-boinc-BOINC",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/boinc/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-cassandra",module_name:"cassandra",plugin_name:"go.d.plugin",monitored_instance:{categories:["data-collection.databases"],icon_filename:"cassandra.svg",name:"Cassandra",link:"https://cassandra.apache.org/_/index.html"},alternative_monitored_instances:[],keywords:["nosql","dbms","db","database"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Cassandra\n\nPlugin: go.d.plugin\nModule: cassandra\n\n## Overview\n\nThis collector gathers metrics about client requests, cache hits, and many more, while also providing metrics per each thread pool.\n\n\nThe [JMX Exporter](https://github.com/prometheus/jmx_exporter) is used to fetch metrics from a Cassandra instance and make them available at an endpoint like `http://127.0.0.1:7072/metrics`.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 7072.\n\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:7072/metrics\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **cassandra** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **cassandra**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/cassandra.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Configure Cassandra with Prometheus JMX Exporter\n\nTo configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter):\n\n> **Note**: paths can differ depends on your setup.\n\n- Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file\n  and install it in a directory where Cassandra can access it.\n- Add\n  the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml)\n  file to `/etc/cassandra`.\n- Add the following line to `/etc/cassandra/cassandra-env.sh`\n  ```\n  JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml\n  ```\n- Restart cassandra service.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:7072/metrics | yes |\n|  | timeout | HTTP request timeout (seconds). | 2 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **cassandra** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the cassandra data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _cassandra_ (or scroll the list) to locate the **cassandra** collector.\n5. Click the **+** next to the **cassandra** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/cassandra.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cassandra.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:7072/metrics\n\n```\n###### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:7072/metrics\n    username: foo\n    password: bar\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nLocal server with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:7072/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:7072/metrics\n\n  - name: remote\n    url: http://192.0.2.1:7072/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `cassandra` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m cassandra\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m cassandra -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `cassandra` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep cassandra\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep cassandra /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep cassandra\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Cassandra instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.client_requests_rate | read, write | requests/s |\n| cassandra.client_request_read_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_request_write_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_requests_latency | read, write | seconds |\n| cassandra.row_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.row_cache_hit_rate | hits, misses | events/s |\n| cassandra.row_cache_utilization | used | percentage |\n| cassandra.row_cache_size | size | bytes |\n| cassandra.key_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.key_cache_hit_rate | hits, misses | events/s |\n| cassandra.key_cache_utilization | used | percentage |\n| cassandra.key_cache_size | size | bytes |\n| cassandra.storage_live_disk_space_used | used | bytes |\n| cassandra.compaction_completed_tasks_rate | completed | tasks/s |\n| cassandra.compaction_pending_tasks_count | pending | tasks |\n| cassandra.compaction_compacted_rate | compacted | bytes/s |\n| cassandra.jvm_memory_used | heap, nonheap | bytes |\n| cassandra.jvm_gc_rate | parnew, cms | gc/s |\n| cassandra.jvm_gc_time | parnew, cms | seconds |\n| cassandra.dropped_messages_rate | dropped | messages/s |\n| cassandra.client_requests_timeouts_rate | read, write | timeout/s |\n| cassandra.client_requests_unavailables_rate | read, write | exceptions/s |\n| cassandra.client_requests_failures_rate | read, write | failures/s |\n| cassandra.storage_exceptions_rate | storage | exceptions/s |\n\n### Per thread pool\n\nMetrics related to Cassandra's thread pools. Each thread pool provides its own set of the following metrics.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| thread_pool | thread pool name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.thread_pool_active_tasks_count | active | tasks |\n| cassandra.thread_pool_pending_tasks_count | pending | tasks |\n| cassandra.thread_pool_blocked_tasks_count | blocked | tasks |\n| cassandra.thread_pool_blocked_tasks_rate | blocked | tasks/s |\n\n",integration_type:"collector",id:"go.d.plugin-cassandra-Cassandra",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/cassandra/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"go.d.plugin",module_name:"ceph",monitored_instance:{name:"Ceph",link:"https://ceph.io/",categories:["data-collection.storage"],icon_filename:"ceph.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ceph","storage"]},overview:"# Ceph\n\nPlugin: go.d.plugin\nModule: ceph\n\n## Overview\n\nThis collector monitors the overall health status and performance of your Ceph clusters.\nIt gathers key metrics for the entire cluster, individual Pools, and OSDs.\n\n\nIt collects metrics by periodically issuing HTTP GET requests to the Ceph Manager [REST API](https://docs.ceph.com/en/reef/mgr/ceph_api/#):\n\n- [/api/monitor](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-monitor) (only once to get the Ceph cluster id (fsid))          \n- [/api/health/minimal](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-health-minimal)\n- [/api/osd](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-osd)\n- [/api/pool?stats=true](https://docs.ceph.com/en/reef/mgr/ceph_api/#get--api-pool)\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect Ceph Manager instances running on:\n\n- localhost that are listening on port 8443\n- within Docker containers\n\n> **Note that the Ceph REST API requires a username and password**. \n> While Netdata can automatically detect Ceph Manager instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **ceph** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **ceph**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/ceph.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | The URL of the [Ceph Manager API](https://docs.ceph.com/en/reef/mgr/ceph_api/). | https://127.0.0.1:8443 | yes |\n|  | timeout | HTTP request timeout (seconds). | 2 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | yes |\n|  | password | Password for Basic HTTP authentication. |  | yes |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | yes | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **ceph** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the ceph data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _ceph_ (or scroll the list) to locate the **ceph** collector.\n5. Click the **+** next to the **ceph** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/ceph.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ceph.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:8443\n    username: user\n    password: pass\n\n```\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:8443\n    username: user\n    password: pass\n\n  - name: remote\n    url: https://192.0.2.1:8443\n    username: user\n    password: pass\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ceph` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m ceph\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m ceph -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ceph` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ceph\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ceph /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ceph\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ ceph_cluster_physical_capacity_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf) | ceph.cluster_physical_capacity_utilization | Ceph cluster ${label:fsid} disk space utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cluster\n\nThese metrics refer to the entire Ceph cluster.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| fsid | A unique identifier of the cluster. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.cluster_status | ok, err, warn | status |\n| ceph.cluster_hosts_count | hosts | hosts |\n| ceph.cluster_monitors_count | monitors | monitors |\n| ceph.cluster_osds_count | osds | osds |\n| ceph.cluster_osds_by_status_count | up, down, in, out | status |\n| ceph.cluster_managers_count | active, standby | managers |\n| ceph.cluster_object_gateways_count | object | gateways |\n| ceph.cluster_iscsi_gateways_count | iscsi | gateways |\n| ceph.cluster_iscsi_gateways_by_status_count | up, down | gateways |\n| ceph.cluster_physical_capacity_utilization | utilization | percent |\n| ceph.cluster_physical_capacity_usage | avail, used | bytes |\n| ceph.cluster_objects_count | objects | objects |\n| ceph.cluster_objects_by_status_distribution | healthy, misplaced, degraded, unfound | percent |\n| ceph.cluster_pools_count | pools | pools |\n| ceph.cluster_pgs_count | pgs | pgs |\n| ceph.cluster_pgs_by_status_count | clean, working, warning, unknown | pgs |\n| ceph.cluster_pgs_per_osd_count | per_osd | pgs |\n\n### Per osd\n\nThese metrics refer to the Object Storage Daemon (OSD).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| fsid | A unique identifier of the cluster. |\n| osd_uuid | OSD UUID. |\n| osd_name | OSD name. |\n| device_class | OSD device class. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.osd_status | up, down, in, out | status |\n| ceph.osd_space_usage | avail, used | bytes |\n| ceph.osd_io | read, written | bytes/s |\n| ceph.osd_iops | read, write | ops/s |\n| ceph.osd_latency | commit, apply | milliseconds |\n\n### Per pool\n\nThese metrics refer to the Pool.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| fsid | A unique identifier of the cluster. |\n| pool_name | Pool name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.pool_space_utilization | utilization | percent |\n| ceph.pool_space_usage | avail, used | bytes |\n| ceph.pool_objects_count | object | objects |\n| ceph.pool_io | read, written | bytes/s |\n| ceph.pool_iops | read, write | ops/s |\n\n",integration_type:"collector",id:"go.d.plugin-ceph-Ceph",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/ceph/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-chrony",module_name:"chrony",plugin_name:"go.d.plugin",monitored_instance:{categories:["data-collection.networking"],icon_filename:"chrony.jpg",name:"Chrony",link:"https://chrony.tuxfamily.org/"},alternative_monitored_instances:[],keywords:[],info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# Chrony\n\nPlugin: go.d.plugin\nModule: chrony\n\n## Overview\n\nThis collector monitors the system's clock performance and peers activity status\n\n\nIt collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6.\nAdditionally, for data collection jobs that connect to localhost Chrony instances, it collects serverstats metrics (NTP packets, command packets received/dropped) by executing the 'chronyc serverstats' command.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers Chrony instance running on the local host and listening on port 323.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:323\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **chrony** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **chrony**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/chrony.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | Chrony server address (`IP:PORT`). | 127.0.0.1:323 | yes |\n|  | timeout | Connection timeout (seconds). Set 0 to disable. | 1 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **chrony** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the chrony data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _chrony_ (or scroll the list) to locate the **chrony** collector.\n5. Click the **+** next to the **chrony** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/chrony.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/chrony.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:323\n\n```\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:323\n\n  - name: remote\n    address: 192.0.2.1:323\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `chrony` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m chrony\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m chrony -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `chrony` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep chrony\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep chrony /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep chrony\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Chrony instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| chrony.stratum | stratum | level |\n| chrony.current_correction | current_correction | seconds |\n| chrony.root_delay | root_delay | seconds |\n| chrony.root_dispersion | root_delay | seconds |\n| chrony.last_offset | offset | seconds |\n| chrony.rms_offset | offset | seconds |\n| chrony.frequency | frequency | ppm |\n| chrony.residual_frequency | residual_frequency | ppm |\n| chrony.skew | skew | ppm |\n| chrony.update_interval | update_interval | seconds |\n| chrony.ref_measurement_time | ref_measurement_time | seconds |\n| chrony.leap_status | normal, insert_second, delete_second, unsynchronised | status |\n| chrony.activity | online, offline, burst_online, burst_offline, unresolved | sources |\n| chrony.ntp_packets | received, dropped | packets/s |\n| chrony.command_packets | received, dropped | packets/s |\n\n",integration_type:"collector",id:"go.d.plugin-chrony-Chrony",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/chrony/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-clickhouse",plugin_name:"go.d.plugin",module_name:"clickhouse",monitored_instance:{name:"ClickHouse",link:"https://clickhouse.com/",icon_filename:"clickhouse.svg",categories:["data-collection.databases"]},keywords:["database"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# ClickHouse\n\nPlugin: go.d.plugin\nModule: clickhouse\n\n## Overview\n\nThis collector retrieves performance data from ClickHouse for connections, queries, resources, replication, IO, and data operations (inserts, selects, merges) using HTTP requests and ClickHouse system tables. It monitors your ClickHouse server's health and activity.\n\n\nIt sends HTTP requests to the ClickHouse [HTTP interface](https://clickhouse.com/docs/en/interfaces/http), executing SELECT queries to retrieve data from various system tables.\nSpecifically, it collects metrics from the following tables:\n\n- system.metrics\n- system.async_metrics\n- system.events\n- system.disks\n- system.parts\n- system.processes\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects ClickHouse instances running on localhost that are listening on port 8123.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:8123\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **clickhouse** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **clickhouse**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/clickhouse.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8123 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Functions** | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **clickhouse** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the clickhouse data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _clickhouse_ (or scroll the list) to locate the **clickhouse** collector.\n5. Click the **+** next to the **clickhouse** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/clickhouse.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/clickhouse.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8123\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8123\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nClickHouse with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:8123\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8123\n\n  - name: remote\n    url: http://192.0.2.1:8123\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `clickhouse` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m clickhouse\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m clickhouse -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `clickhouse` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep clickhouse\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep clickhouse /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep clickhouse\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ clickhouse_restarted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.uptime | ClickHouse has recently been restarted |\n| [ clickhouse_queries_preempted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.queries_preempted | ClickHouse has queries that are stopped and waiting due to priority setting |\n| [ clickhouse_long_running_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.longest_running_query_time | ClickHouse has a long-running query exceeding the threshold |\n| [ clickhouse_rejected_inserts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.rejected_inserts | ClickHouse has INSERT queries that are rejected due to high number of active data parts for partition in a MergeTree |\n| [ clickhouse_delayed_inserts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.delayed_inserts | ClickHouse has INSERT queries that are throttled due to high number of active data parts for partition in a MergeTree |\n| [ clickhouse_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.replicas_max_absolute_delay | ClickHouse is experiencing replication lag greater than 5 minutes |\n| [ clickhouse_replicated_readonly_tables ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.replicated_readonly_tables | ClickHouse has replicated tables in readonly state due to ZooKeeper session loss/startup without ZooKeeper configured |\n| [ clickhouse_max_part_count_for_partition ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.max_part_count_for_partition | ClickHouse high number of parts per partition |\n| [ clickhouse_distributed_connections_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.distributed_connections_fail_exhausted_retries | ClickHouse has failed distributed connections after exhausting all retry attempts |\n| [ clickhouse_distributed_files_to_insert ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.distributed_files_to_insert | ClickHouse high number of pending files to process for asynchronous insertion into Distributed tables |\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves and aggregates SQL query performance metrics from ClickHouse [system.query_log](https://clickhouse.com/docs/en/operations/system-tables/query_log) table.\n\nThis function queries the `system.query_log` table, which contains information about executed queries including timing, resource usage, and execution statistics. Queries are grouped by their normalized hash (`normalized_query_hash`) to aggregate statistics for identical query patterns with different literal values.\n\nUse cases:\n- Identify slow queries that consume the most execution time\n- Find frequently executed queries that may benefit from optimization\n- Analyze I/O patterns by examining read/written rows and bytes\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Clickhouse:top-queries` |\n| Require Cloud | yes |\n| Performance | Queries `system.query_log` table and aggregates by `normalized_query_hash`:<br/>\u2022 On busy systems with high query throughput, the table can grow large<br/>\u2022 Default limit of 500 rows balances usefulness with performance |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in query parameters<br/>\u2022 Business data and internal identifiers<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to ClickHouse<br/>\u2022 `system.query_log` table is accessible<br/>\u2022 Returns HTTP 503 if `system.query_log` is not accessible<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Grant access to `system.query_log`\n\nEnsure the Netdata user can read `system.query_log` on the target ClickHouse instance.\n\n1. Verify `query_log` is enabled (enabled by default):\n\n   ```sql\n   SELECT * FROM system.query_log LIMIT 1;\n   ```\n\n2. If using a dedicated monitoring user, grant SELECT access:\n\n   ```sql\n   GRANT SELECT ON system.query_log TO netdata_user;\n   ```\n\n:::info\n\n- The `query_log` table is enabled by default in ClickHouse\n- Only queries with `type='QueryFinish'` are included in the results\n- The `normalized_query_hash` column is used for grouping when available\n\n:::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. The available options include total execution time, number of calls, rows read, and more. Defaults to total execution time to focus on most resource-intensive queries. | yes | totalTime |  |\n\n#### Returns\n\nAggregated query statistics from `system.query_log`, grouped by normalized query hash. Each row represents a unique query pattern with cumulative metrics across all executions.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Query ID | string |  | hidden | Unique hash identifier for the normalized query pattern. Queries with identical structure but different literal values share the same hash. |\n| Query | string |  |  | SQL query text from one of the executions. Truncated to 4096 characters. Use this to identify the actual SQL being executed. |\n| Database | string |  |  | Database name where the query was executed. Empty string for queries without a database context or system queries. |\n| User | string |  |  | ClickHouse user that executed the query. Useful for identifying query sources and implementing per-user resource monitoring. |\n| Calls | integer |  |  | Total number of times this query pattern has been executed. High values indicate frequently run queries that impact overall server load. |\n| Total Time | duration | milliseconds |  | Cumulative execution time across all executions. High values indicate queries that consume significant server resources over time. |\n| Avg Time | duration | milliseconds |  | Average execution time per query run. Use this to compare typical performance across different query patterns. |\n| Min Time | duration | milliseconds | hidden | Minimum execution time observed for a single execution. Helps identify best-case query performance. |\n| Max Time | duration | milliseconds | hidden | Maximum execution time observed for a single execution. Large gaps between min and max may indicate data skew or resource contention. |\n| Read Rows | integer |  |  | Total number of rows read from storage across all executions. High values suggest queries scanning large amounts of data that may benefit from better filtering or indexing. |\n| Read Bytes | integer |  |  | Total bytes read from storage across all executions. Indicates I/O load and data transfer volume for the query pattern. |\n| Written Rows | integer |  | hidden | Total number of rows written across all executions. Relevant for INSERT, CREATE, or materialized view queries. |\n| Written Bytes | integer |  | hidden | Total bytes written across all executions. Indicates storage impact of write operations. |\n| Result Rows | integer |  |  | Total number of rows returned to clients across all executions. A high ratio of read rows to result rows indicates filtering or aggregation happening on large datasets. |\n| Result Bytes | integer |  | hidden | Total bytes returned to clients across all executions. Large values may indicate queries returning more data than necessary. |\n| Max Memory | float |  | hidden | Maximum memory used during any single execution. High values may indicate queries at risk of hitting memory limits under load. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ClickHouse instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| clickhouse.connections | tcp, http, mysql, postgresql, interserver | connections |\n| clickhouse.slow_reads | slow | reads/s |\n| clickhouse.read_backoff | read_backoff | events/s |\n| clickhouse.memory_usage | used | bytes |\n| clickhouse.running_queries | running | queries |\n| clickhouse.queries_preempted | preempted | queries |\n| clickhouse.queries | successful, failed | queries/s |\n| clickhouse.select_queries | successful, failed | selects/s |\n| clickhouse.insert_queries | successful, failed | inserts/s |\n| clickhouse.queries_memory_limit_exceeded | mem_limit_exceeded | queries/s |\n| clickhouse.longest_running_query_time | longest_query_time | seconds |\n| clickhouse.queries_latency | queries_time | microseconds |\n| clickhouse.select_queries_latency | selects_time | microseconds |\n| clickhouse.insert_queries_latency | inserts_time | microseconds |\n| clickhouse.io | reads, writes | bytes/s |\n| clickhouse.iops | reads, writes | ops/s |\n| clickhouse.io_errors | read, write | errors/s |\n| clickhouse.io_seeks | lseek | ops/s |\n| clickhouse.io_file_opens | file_open | ops/s |\n| clickhouse.replicated_parts_current_activity | fetch, send, check | parts |\n| clickhouse.replicas_max_absolute_dela | replication_delay | seconds |\n| clickhouse.replicated_readonly_tables | read_only | tables |\n| clickhouse.replicated_data_loss | data_loss | events |\n| clickhouse.replicated_part_fetches | successful, failed | fetches/s |\n| clickhouse.inserted_rows | inserted | rows/s |\n| clickhouse.inserted_bytes | inserted | bytes/s |\n| clickhouse.rejected_inserts | rejected | inserts/s |\n| clickhouse.delayed_inserts | delayed | inserts/s |\n| clickhouse.delayed_inserts_throttle_time | delayed_inserts_throttle_time | milliseconds |\n| clickhouse.selected_bytes | selected | bytes/s |\n| clickhouse.selected_rows | selected | rows/s |\n| clickhouse.selected_parts | selected | parts/s |\n| clickhouse.selected_ranges | selected | ranges/s |\n| clickhouse.selected_marks | selected | marks/s |\n| clickhouse.merges | merge | ops/s |\n| clickhouse.merges_latency | merges_time | milliseconds |\n| clickhouse.merged_uncompressed_bytes | merged_uncompressed | bytes/s |\n| clickhouse.merged_rows | merged | rows/s |\n| clickhouse.merge_tree_data_writer_inserted_rows | inserted | rows/s |\n| clickhouse.merge_tree_data_writer_uncompressed_bytes | inserted | bytes/s |\n| clickhouse.merge_tree_data_writer_compressed_bytes | written | bytes/s |\n| clickhouse.uncompressed_cache_requests | hits, misses | requests/s |\n| clickhouse.mark_cache_requests | hits, misses | requests/s |\n| clickhouse.max_part_count_for_partition | max_parts_partition | parts |\n| clickhouse.parts_count | temporary, pre_active, active, deleting, delete_on_destroy, outdated, wide, compact | parts |\n| distributed_connections | active | connections |\n| distributed_connections_attempts | connection | attempts/s |\n| distributed_connections_fail_retries | connection_retry | fails/s |\n| distributed_connections_fail_exhausted_retries | connection_retry_exhausted | fails/s |\n| distributed_files_to_insert | pending_insertions | files |\n| distributed_rejected_inserts | rejected | inserts/s |\n| distributed_delayed_inserts | delayed | inserts/s |\n| distributed_delayed_inserts_latency | delayed_time | milliseconds |\n| distributed_sync_insertion_timeout_exceeded | sync_insertion | timeouts/s |\n| distributed_async_insertions_failures | async_insertions | failures/s |\n| clickhouse.uptime | uptime | seconds |\n\n### Per disk\n\nThese metrics refer to the Disk.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| disk_name | Name of the disk as defined in the [server configuration](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-multiple-volumes_configure). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| clickhouse.disk_space_usage | free, used | bytes |\n\n### Per table\n\nThese metrics refer to the Database Table.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| database | Name of the database. |\n| table | Name of the table. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| clickhouse.database_table_size | size | bytes |\n| clickhouse.database_table_parts | parts | parts |\n| clickhouse.database_table_rows | rows | rows |\n\n",integration_type:"collector",id:"go.d.plugin-clickhouse-ClickHouse",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/clickhouse/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-cockroachdb",plugin_name:"go.d.plugin",module_name:"cockroachdb",monitored_instance:{name:"CockroachDB",link:"https://www.cockroachlabs.com/",icon_filename:"cockroachdb.svg",categories:["data-collection.databases"]},keywords:["cockroachdb","databases"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# CockroachDB\n\nPlugin: go.d.plugin\nModule: cockroachdb\n\n## Overview\n\nThis collector monitors CockroachDB servers.\n\n\nIt scrapes Prometheus metrics from the CockroachDB `/_status/vars` endpoint.\n\nIt also provides `top-queries` and `running-queries` functions using SQL statement statistics (`crdb_internal.cluster_statement_statistics`) and the `SHOW CLUSTER STATEMENTS` command.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe `top-queries` and `running-queries` functions require:\n\n- A SQL user with `VIEWACTIVITY` or `VIEWACTIVITYREDACTED` privileges.\n- Access to `crdb_internal.cluster_statement_statistics` (may require `SET allow_unsafe_internals = on` on newer versions).\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **cockroachdb** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **cockroachdb**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/cockroachdb.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8080/_status/vars | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Functions** | functions.dsn | SQL DSN (required for query functions). |  | no |\n|  | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n|  | functions.running_queries.disabled | Disable the [running-queries](#running-queries) function. | no | no |\n|  | functions.running_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.running_queries.limit | Maximum number of queries to return. | 500 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **cockroachdb** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the cockroachdb data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _cockroachdb_ (or scroll the list) to locate the **cockroachdb** collector.\n5. Click the **+** next to the **cockroachdb** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/cockroachdb.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cockroachdb.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080/_status/vars\n\n```\n{% /details %}\n###### Top queries\n\nEnable SQL query functions.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080/_status/vars\n    functions:\n      dsn: postgres://root@127.0.0.1:26257/defaultdb?sslmode=disable\n\n```\n{% /details %}\n###### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080/_status/vars\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nCockroachDB with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:8080/_status/vars\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080/_status/vars\n\n  - name: remote\n    url: http://203.0.113.10:8080/_status/vars\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `cockroachdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m cockroachdb\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m cockroachdb -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `cockroachdb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep cockroachdb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep cockroachdb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep cockroachdb\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ cockroachdb_used_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage capacity utilization |\n| [ cockroachdb_used_usable_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage usable space utilization |\n| [ cockroachdb_unavailable_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than needed for quorum |\n| [ cockroachdb_underreplicated_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than the replication target |\n| [ cockroachdb_open_file_descriptors_limit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.process_file_descriptors | open file descriptors utilization (against softlimit) |\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves and aggregates SQL statement performance metrics from CockroachDB [crdb_internal.cluster_statement_statistics](https://www.cockroachlabs.com/docs/stable/crdb-internal#cluster_statement_statistics) table.\n\nThis function queries cluster-wide statement statistics grouped by fingerprint (normalized query pattern). It provides aggregated metrics including execution counts, timing breakdowns, and row operation statistics.\n\nUse cases:\n- Identify slow queries consuming the most total execution time\n- Find frequently executed queries that may benefit from optimization\n- Analyze row read/write patterns to detect inefficient queries\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Cockroachdb:top-queries` |\n| Require Cloud | yes |\n| Performance | Queries the `crdb_internal.cluster_statement_statistics` table which aggregates data across the cluster:<br/>\u2022 On busy clusters with high query throughput, this query may take longer<br/>\u2022 Default limit of 500 rows balances usefulness with performance |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in WHERE clauses or INSERT values<br/>\u2022 Business data and internal identifiers<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to CockroachDB<br/>\u2022 The SQL user has `VIEWACTIVITY` or `VIEWACTIVITYREDACTED` privileges<br/>\u2022 Returns HTTP 503 if the SQL connection cannot be established<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Grant `VIEWACTIVITY` access to cluster statement stats\n\nThe SQL user must have appropriate privileges to access statement statistics.\n\n1. Grant `VIEWACTIVITY` (shows full query text) or `VIEWACTIVITYREDACTED` (masks literals):\n\n   ```sql\n   GRANT SYSTEM VIEWACTIVITY TO netdata_user;\n   -- OR for privacy:\n   GRANT SYSTEM VIEWACTIVITYREDACTED TO netdata_user;\n   ```\n\n2. On newer CockroachDB versions, access to `crdb_internal` may require:\n\n   ```sql\n   SET allow_unsafe_internals = on;\n   ```\n\n:::info\n\n- The collector automatically sets `allow_unsafe_internals = on` for the session when querying `crdb_internal` tables (required on newer versions)\n- `VIEWACTIVITYREDACTED` replaces literal values with underscores for privacy\n- Statement statistics are collected by default but can be disabled via cluster settings\n\n:::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Options include total time, executions, rows read, rows written, and more. Defaults to total time to focus on most resource-intensive queries. | yes | totalTime |  |\n\n#### Returns\n\nAggregated SQL statement statistics grouped by fingerprint. Each row represents a unique query pattern with cumulative metrics across all executions.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Fingerprint ID | string |  | hidden | Unique hash identifier for the normalized query pattern. Queries with identical structure but different literal values share the same fingerprint. |\n| Query | string |  |  | Normalized SQL statement text with literals replaced. Truncated to 4096 characters. |\n| Database | string |  |  | Database name where the query was executed. Empty for queries without database context. |\n| Application | string |  |  | Application name that executed the query. Useful for identifying query sources across services. |\n| Statement Type | string |  | hidden | Type of SQL statement (SELECT, INSERT, UPDATE, DELETE, etc.). |\n| Distributed | string |  | hidden | Whether the query used DistSQL execution (true/false). Distributed queries span multiple nodes. |\n| Full Scan | string |  | hidden | Whether the query performed a full table scan (true/false). Full scans may indicate missing indexes. |\n| Implicit Txn | string |  | hidden | Whether the statement ran in an implicit transaction (true/false). |\n| Vectorized | string |  | hidden | Whether the query used vectorized execution (true/false). Vectorized execution improves performance for analytical queries. |\n| Executions | integer |  |  | Total number of times this query pattern has been executed. High values indicate frequently run queries. |\n| Total Time | duration | milliseconds |  | Cumulative service latency across all executions (mean time \xd7 executions). High values indicate queries consuming significant cluster resources. |\n| Mean Time | duration | milliseconds |  | Average service latency per execution. Use this to compare typical performance across query patterns. |\n| Run Time | duration | milliseconds | hidden | Average time spent executing the query after planning. Excludes parse and plan time. |\n| Plan Time | duration | milliseconds | hidden | Average time spent generating the query execution plan. High values may indicate complex queries or stale statistics. |\n| Parse Time | duration | milliseconds | hidden | Average time spent parsing the SQL statement. |\n| Rows Read | integer |  |  | Total rows read across all executions. High values relative to rows returned suggest missing indexes or inefficient scans. |\n| Rows Written | integer |  |  | Total rows written across all executions. Indicates write workload for INSERT, UPDATE, DELETE statements. |\n| Rows Returned | integer |  |  | Total rows returned to clients across all executions. Compare with rows read to assess query efficiency. |\n| Bytes Read | integer |  | hidden | Total bytes read from storage across all executions. Indicates I/O load for the query pattern. |\n| Max Retries | integer |  | hidden | Maximum number of automatic retries observed for this query pattern. High values indicate transaction contention. |\n\n### Running Queries\n\nRetrieves currently executing SQL statements across the CockroachDB cluster using [SHOW CLUSTER STATEMENTS](https://www.cockroachlabs.com/docs/stable/show-statements).\n\nThis function provides a real-time snapshot of all active queries across all nodes in the cluster, including their execution phase, duration, and associated metadata.\n\nUse cases:\n- Identify long-running queries that may be blocking other operations\n- Monitor active workload distribution across the cluster\n- Debug stuck or slow queries in real-time\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Cockroachdb:running-queries` |\n| Require Cloud | yes |\n| Performance | Executes the `SHOW CLUSTER STATEMENTS` command which queries all nodes in the cluster:<br/>\u2022 Lightweight operation with minimal overhead<br/>\u2022 Returns only currently active queries, typically a small result set |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in WHERE clauses or VALUES<br/>\u2022 Session tokens or credentials<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to CockroachDB<br/>\u2022 The SQL user has `VIEWACTIVITY` or `VIEWACTIVITYREDACTED` privileges<br/>\u2022 Returns HTTP 503 if the SQL connection cannot be established<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Grant `VIEWACTIVITY` access to system tables\n\nThe SQL user must have appropriate privileges to view running statements.\n\n1. Grant `VIEWACTIVITY` (shows full query text) or `VIEWACTIVITYREDACTED` (masks literals):\n\n   ```sql\n   GRANT SYSTEM VIEWACTIVITY TO netdata_user;\n   -- OR for privacy:\n   GRANT SYSTEM VIEWACTIVITYREDACTED TO netdata_user;\n   ```\n\n   :::info\n\n   - `SHOW CLUSTER STATEMENTS` shows queries across all nodes, not just the connected node\n   - `VIEWACTIVITYREDACTED` replaces literal values with underscores for privacy\n   - Queries shown are point-in-time snapshots and may complete between retrieval and display\n\n   :::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Defaults to elapsed time to show longest-running queries first. | yes | elapsedMs |  |\n\n#### Returns\n\nReal-time snapshot of currently executing SQL statements across all cluster nodes. Each row represents a single active query.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Query ID | string |  | hidden | Unique identifier for this specific query execution. Can be used with CANCEL QUERY if needed. |\n| Query | string |  |  | The SQL statement currently being executed. Truncated to 4096 characters. |\n| User | string |  |  | Database user executing the query. Useful for identifying workload by user. |\n| Application | string |  |  | Application name from the client connection. Helps identify which service is running the query. |\n| Client Address | string |  | hidden | IP address of the client connection. Useful for identifying query sources. |\n| Node ID | string |  | hidden | CockroachDB node currently executing the query. Helps identify workload distribution. |\n| Session ID | string |  | hidden | Session identifier for the connection. Multiple queries may share a session. |\n| Phase | string |  |  | Current execution phase (executing, preparing, etc.). Indicates query progress. |\n| Distributed | string |  | hidden | Whether the query is using distributed execution across multiple nodes. |\n| Start Time | string |  | hidden | Timestamp when the query started executing. |\n| Elapsed | duration | milliseconds |  | Time elapsed since query started. High values indicate long-running queries that may need investigation. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CockroachDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cockroachdb.process_cpu_time_combined_percentage | used | percentage |\n| cockroachdb.process_cpu_time_percentage | user, sys | percentage |\n| cockroachdb.process_cpu_time | user, sys | ms |\n| cockroachdb.process_memory | rss | KiB |\n| cockroachdb.process_file_descriptors | open | fd |\n| cockroachdb.process_uptime | uptime | seconds |\n| cockroachdb.host_disk_bandwidth | read, write | KiB |\n| cockroachdb.host_disk_operations | reads, writes | operations |\n| cockroachdb.host_disk_iops_in_progress | in_progress | iops |\n| cockroachdb.host_network_bandwidth | received, sent | kilobits |\n| cockroachdb.host_network_packets | received, sent | packets |\n| cockroachdb.live_nodes | live_nodes | nodes |\n| cockroachdb.node_liveness_heartbeats | successful, failed | heartbeats |\n| cockroachdb.total_storage_capacity | total | KiB |\n| cockroachdb.storage_capacity_usability | usable, unusable | KiB |\n| cockroachdb.storage_usable_capacity | available, used | KiB |\n| cockroachdb.storage_used_capacity_percentage | total, usable | percentage |\n| cockroachdb.sql_connections | active | connections |\n| cockroachdb.sql_bandwidth | received, sent | KiB |\n| cockroachdb.sql_statements_total | started, executed | statements |\n| cockroachdb.sql_errors | statement, transaction | errors |\n| cockroachdb.sql_started_ddl_statements | ddl | statements |\n| cockroachdb.sql_executed_ddl_statements | ddl | statements |\n| cockroachdb.sql_started_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_executed_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_started_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_executed_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_active_distributed_queries | active | queries |\n| cockroachdb.sql_distributed_flows | active, queued | flows |\n| cockroachdb.live_bytes | applications, system | KiB |\n| cockroachdb.logical_data | keys, values | KiB |\n| cockroachdb.logical_data_count | keys, values | num |\n| cockroachdb.kv_transactions | committed, fast-path_committed, aborted | transactions |\n| cockroachdb.kv_transaction_restarts | write_too_old, write_too_old_multiple, forwarded_timestamp, possible_reply, async_consensus_failure, read_within_uncertainty_interval, aborted, push_failure, unknown | restarts |\n| cockroachdb.ranges | ranges | ranges |\n| cockroachdb.ranges_replication_problem | unavailable, under_replicated, over_replicated | ranges |\n| cockroachdb.range_events | split, add, remove, merge | events |\n| cockroachdb.range_snapshot_events | generated, applied_raft_initiated, applied_learner, applied_preemptive | events |\n| cockroachdb.rocksdb_read_amplification | reads | reads/query |\n| cockroachdb.rocksdb_table_operations | compactions, flushes | operations |\n| cockroachdb.rocksdb_cache_usage | used | KiB |\n| cockroachdb.rocksdb_cache_operations | hits, misses | operations |\n| cockroachdb.rocksdb_cache_hit_rate | hit_rate | percentage |\n| cockroachdb.rocksdb_sstables | sstables | sstables |\n| cockroachdb.replicas | replicas | replicas |\n| cockroachdb.replicas_quiescence | quiescent, active | replicas |\n| cockroachdb.replicas_leaders | leaders, not_leaseholders | replicas |\n| cockroachdb.replicas_leaseholders | leaseholders | leaseholders |\n| cockroachdb.queue_processing_failures | gc, replica_gc, replication, split, consistency, raft_log, raft_snapshot, time_series_maintenance | failures |\n| cockroachdb.rebalancing_queries | avg | queries/s |\n| cockroachdb.rebalancing_writes | avg | writes/s |\n| cockroachdb.timeseries_samples | written | samples |\n| cockroachdb.timeseries_write_errors | write | errors |\n| cockroachdb.timeseries_write_bytes | written | KiB |\n| cockroachdb.slow_requests | acquiring_latches, acquiring_lease, in_raft | requests |\n| cockroachdb.code_heap_memory_usage | go, cgo | KiB |\n| cockroachdb.goroutines | goroutines | goroutines |\n| cockroachdb.gc_count | gc | invokes |\n| cockroachdb.gc_pause | pause | us |\n| cockroachdb.cgo_calls | cgo | calls |\n\n",integration_type:"collector",id:"go.d.plugin-cockroachdb-CockroachDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/cockroachdb/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-consul",plugin_name:"go.d.plugin",module_name:"consul",monitored_instance:{name:"Consul",link:"https://www.consul.io/",categories:["data-collection.applications"],icon_filename:"consul.svg"},alternative_monitored_instances:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["service networking platform","hashicorp","autopilot"]},overview:"# Consul\n\nPlugin: go.d.plugin\nModule: consul\n\n## Overview\n\nThis collector monitors [key metrics](https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics) of Consul Agents: transaction timings, leadership changes, memory usage and more.\n\n\nIt periodically sends HTTP requests to [Consul REST API](https://developer.hashicorp.com/consul/api-docs).\n\nUsed endpoints:\n\n- [/operator/autopilot/health](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health)\n- [/agent/checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks)\n- [/agent/self](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration)\n- [/agent/metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics)\n- [/coordinate/nodes](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host, that provide metrics on port 8500.\n\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8500\n- http://127.0.0.1:8500\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **consul** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **consul**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/consul.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable Prometheus telemetry\n\n[Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul Agent, by increasing the value of `prometheus_retention_time` from `0`.\n\n\n#### Add required ACLs to Token\n\nRequired **only if authentication is enabled**.\n\n|       ACL       | Endpoint                                                                                                                                                                                                                                                                                       |\n|:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health)                                                                                                                                                                                      |\n|   `node:read`   | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks)                                                                                                                                                                                                              |\n|  `agent:read`   | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) |\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="All options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Consul HTTP API URL. | http://localhost:8500 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | acl_token | Consul ACL token sent with every request (`X-Consul-Token` header). |  | no |\n|  | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **consul** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the consul data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _consul_ (or scroll the list) to locate the **consul** collector.\n5. Click the **+** next to the **consul** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/consul.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/consul.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8500\n    acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7"\n\n```\n###### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8500\n    acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7"\n    username: foo\n    password: bar\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8500\n    acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7"\n\n  - name: remote\n    url: http://203.0.113.10:8500\n    acl_token: "ada7f751-f654-8872-7f93-498e799158b6"\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `consul` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m consul\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m consul -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `consul` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep consul\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep consul /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep consul\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ consul_node_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.node_health_check_status | node health check ${label:check_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_service_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.service_health_check_status | service health check ${label:check_name} for service ${label:service_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_exceeded_rate | number of rate-limited RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_failed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_failed_rate | number of failed RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_gc_pause_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.gc_pause_time | time spent in stop-the-world garbage collection pauses on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_autopilot_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_health_status | datacenter ${label:datacenter} cluster is unhealthy as reported by server ${label:node_name} |\n| [ consul_autopilot_server_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_server_health_status | server ${label:node_name} from datacenter ${label:datacenter} is unhealthy |\n| [ consul_raft_leader_last_contact_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leader_last_contact_time | median time elapsed since leader server ${label:node_name} datacenter ${label:datacenter} was last able to contact the follower nodes |\n| [ consul_raft_leadership_transitions ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leadership_transitions_rate | there has been a leadership change and server ${label:node_name} datacenter ${label:datacenter} has become the leader |\n| [ consul_raft_thread_main_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_main_saturation_perc | average saturation of the main Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_raft_thread_fsm_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_fsm_saturation_perc | average saturation of the FSM Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_license_expiration_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.license_expiration_time | Consul Enterprise licence expiration time on node ${label:node_name} datacenter ${label:datacenter} |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe set of metrics depends on the [Consul Agent mode](https://developer.hashicorp.com/consul/docs/install/glossary#agent).\n\n\n### Per Consul instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.client_rpc_requests_rate | rpc | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_exceeded_rate | exceeded | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_failed_rate | failed | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.memory_allocated | allocated | bytes | \u2022 | \u2022 | \u2022 |\n| consul.memory_sys | sys | bytes | \u2022 | \u2022 | \u2022 |\n| consul.gc_pause_time | gc_pause | seconds | \u2022 | \u2022 | \u2022 |\n| consul.kvs_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 |   |\n| consul.kvs_apply_operations_rate | kvs_apply | ops/s | \u2022 | \u2022 |   |\n| consul.txn_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 |   |\n| consul.txn_apply_operations_rate | txn_apply | ops/s | \u2022 | \u2022 |   |\n| consul.autopilot_health_status | healthy, unhealthy | status | \u2022 | \u2022 |   |\n| consul.autopilot_failure_tolerance | failure_tolerance | servers | \u2022 | \u2022 |   |\n| consul.autopilot_server_health_status | healthy, unhealthy | status | \u2022 | \u2022 |   |\n| consul.autopilot_server_stable_time | stable | seconds | \u2022 | \u2022 |   |\n| consul.autopilot_server_serf_status | active, failed, left, none | status | \u2022 | \u2022 |   |\n| consul.autopilot_server_voter_status | voter, not_voter | status | \u2022 | \u2022 |   |\n| consul.network_lan_rtt | min, max, avg | ms | \u2022 | \u2022 |   |\n| consul.raft_commit_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 |   |   |\n| consul.raft_commits_rate | commits | commits/s | \u2022 |   |   |\n| consul.raft_leader_last_contact_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 |   |   |\n| consul.raft_leader_oldest_log_age | oldest_log_age | seconds | \u2022 |   |   |\n| consul.raft_follower_last_contact_leader_time | leader_last_contact | ms |   | \u2022 |   |\n| consul.raft_rpc_install_snapshot_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms |   | \u2022 |   |\n| consul.raft_leader_elections_rate | leader | elections/s | \u2022 | \u2022 |   |\n| consul.raft_leadership_transitions_rate | leadership | transitions/s | \u2022 | \u2022 |   |\n| consul.server_leadership_status | leader, not_leader | status | \u2022 | \u2022 |   |\n| consul.raft_thread_main_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 |   |\n| consul.raft_thread_fsm_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 |   |\n| consul.raft_fsm_last_restore_duration | last_restore_duration | ms | \u2022 | \u2022 |   |\n| consul.raft_boltdb_freelist_bytes | freelist | bytes | \u2022 | \u2022 |   |\n| consul.raft_boltdb_logs_per_batch_rate | written | logs/s | \u2022 | \u2022 |   |\n| consul.raft_boltdb_store_logs_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 |   |\n| consul.license_expiration_time | license_expiration | seconds | \u2022 | \u2022 | \u2022 |\n\n### Per node check\n\nMetrics about checks on Node level.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.node_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n### Per service check\n\nMetrics about checks at a Service level.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n| service_name | The service's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.service_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n",integration_type:"collector",id:"go.d.plugin-consul-Consul",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/consul/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-coredns",plugin_name:"go.d.plugin",module_name:"coredns",monitored_instance:{name:"CoreDNS",link:"https://coredns.io/",icon_filename:"coredns.svg",categories:["data-collection.networking"]},keywords:["coredns","dns","kubernetes"],related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"k8s_state"},{plugin_name:"go.d.plugin",module_name:"k8s_apiserver"},{plugin_name:"go.d.plugin",module_name:"k8s_kubelet"},{plugin_name:"go.d.plugin",module_name:"k8s_kubeproxy"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Kubernetes Containers"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# CoreDNS\n\nPlugin: go.d.plugin\nModule: coredns\n\n## Overview\n\nThis collector monitors CoreDNS instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nCoreDNS can be monitored further using the following other integrations:\n\n- {% relatedResource id="go.d.plugin-k8s_state-Kubernetes_Cluster_State" %}Kubernetes Cluster State{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_apiserver-Kubernetes_API_Server" %}Kubernetes API Server{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_kubelet-Kubelet" %}Kubelet{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_kubeproxy-Kubeproxy" %}Kubeproxy{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Kubernetes_Containers" %}Kubernetes Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn\'t support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **coredns** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **coredns**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/coredns.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="All options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:9153/metrics | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **Filters** | [per_server_stats](#option-filters-per-server-stats) | Server filter. |  | no |\n|  | [per_zone_stats](#option-filters-per-zone-stats) | Zone filter. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-per-server-stats"></a>\n##### per_server_stats\n\nMetrics of servers matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_server_stats:\n  includes:\n    - pattern1\n    - pattern2\n  excludes:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-filters-per-zone-stats"></a>\n##### per_zone_stats\n\nMetrics of zones matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_zone_stats:\n  includes:\n    - pattern1\n    - pattern2\n  excludes:\n    - pattern3\n    - pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **coredns** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the coredns data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _coredns_ (or scroll the list) to locate the **coredns** collector.\n5. Click the **+** next to the **coredns** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/coredns.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/coredns.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9153/metrics\n\n```\n{% /details %}\n###### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9153/metrics\n    username: foo\n    password: bar\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9153/metrics\n\n  - name: remote\n    url: http://203.0.113.10:9153/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `coredns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m coredns\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m coredns -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `coredns` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep coredns\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep coredns /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep coredns\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CoreDNS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.dns_request_count_total | requests | requests/s |\n| coredns.dns_responses_count_total | responses | responses/s |\n| coredns.dns_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.dns_no_matching_zone_dropped_total | dropped | requests/s |\n| coredns.dns_panic_count_total | panics | panics/s |\n| coredns.dns_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.dns_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.dns_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.dns_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| server_name | Server name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.server_dns_request_count_total | requests | requests/s |\n| coredns.server_dns_responses_count_total | responses | responses/s |\n| coredns.server_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.server_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.server_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.server_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.server_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per zone\n\nThese metrics refer to the DNS zone.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| zone_name | Zone name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.zone_dns_request_count_total | requests | requests/s |\n| coredns.zone_dns_responses_count_total | responses | responses/s |\n| coredns.zone_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.zone_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.zone_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.zone_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n",integration_type:"collector",id:"go.d.plugin-coredns-CoreDNS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/coredns/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-couchbase",plugin_name:"go.d.plugin",module_name:"couchbase",monitored_instance:{name:"Couchbase",link:"https://www.couchbase.com/",icon_filename:"couchbase.svg",categories:["data-collection.databases"]},keywords:["couchbase","databases"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Couchbase\n\nPlugin: go.d.plugin\nModule: couchbase\n\n## Overview\n\nThis collector monitors Couchbase servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **couchbase** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **couchbase**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/couchbase.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="All options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8091 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Functions** | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **couchbase** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the couchbase data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _couchbase_ (or scroll the list) to locate the **couchbase** collector.\n5. Click the **+** next to the **couchbase** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/couchbase.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchbase.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8091\n\n```\n{% /details %}\n###### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8091\n    username: foo\n    password: bar\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8091\n\n  - name: remote\n    url: http://203.0.113.0:8091\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `couchbase` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m couchbase\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m couchbase -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `couchbase` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep couchbase\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep couchbase /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep couchbase\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves completed N1QL query statistics from Couchbase [system:completed_requests](https://docs.couchbase.com/server/current/manage/monitor/monitoring-n1ql-query.html#sys-completed-req) keyspace.\n\nThis function queries the `system:completed_requests` keyspace which stores information about recently completed N1QL requests. It provides timing metrics, result statistics, and error/warning counts for each completed query.\n\nUse cases:\n- Identify slow N1QL queries consuming the most elapsed time\n- Find queries with high error or warning counts\n- Analyze query patterns by user to understand workload distribution\n\nStatement text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Couchbase:top-queries` |\n| Require Cloud | yes |\n| Performance | Queries `system:completed_requests` via the N1QL query service:<br/>\u2022 The `completed_requests` keyspace has a configurable size limit (`completed-limit` setting)<br/>\u2022 Default limit of 500 rows balances usefulness with performance |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in WHERE clauses or INSERT values<br/>\u2022 Business data embedded in queries<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to Couchbase<br/>\u2022 The N1QL (Query) service is running<br/>\u2022 Returns HTTP 503 if collector is still initializing<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Grant access to `system:completed_requests`\n\nThe user must have appropriate privileges to query system keyspaces and the N1QL service must be available.\n\n1. Ensure the N1QL (Query) service is running on the cluster\n\n2. Grant query system catalog privileges to the monitoring user:\n\n   ```sql\n   GRANT QUERY_SYSTEM_CATALOG TO netdata_user;\n   ```\n\n3. Verify access to `completed_requests`:\n\n   ```sql\n   SELECT * FROM system:completed_requests LIMIT 1;\n   ```\n\n   :::info\n\n   - The `system:completed_requests` keyspace stores recently completed queries based on Couchbase server settings `completed-limit` and `completed-threshold`\n   - Only queries exceeding `completed-threshold` (default 1000ms) are logged to `completed_requests`\n   - Adjust `completed-threshold` in Couchbase Query Settings to capture faster queries if needed\n\n   :::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Options include elapsed time, service time, request time, and result count. Defaults to elapsed time to focus on slowest queries. | yes | elapsedTime |  |\n\n#### Returns\n\nCompleted N1QL request statistics. Each row represents a single completed query with its timing and result metrics.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Request ID | string |  | hidden | Unique identifier for the N1QL request. Can be used for correlation with Couchbase logs. |\n| Request Time | timestamp |  |  | Timestamp when the request was received by the query service. |\n| Statement | string |  |  | The N1QL statement that was executed. Truncated to 4096 characters. |\n| Elapsed Time | duration | milliseconds |  | Total time from request receipt to response completion, including queue time, planning, execution, and result streaming. |\n| Service Time | duration | milliseconds |  | Time spent actively processing the request, excluding network latency and queue wait time. Compare with elapsed time to identify network or queueing delays. |\n| Result Count | integer |  |  | Number of documents/rows returned by the query. High values may indicate queries returning excessive data. |\n| Result Size | integer |  | hidden | Total size of the result set in bytes. Large result sizes may indicate inefficient queries or missing projections. |\n| Error Count | integer |  | hidden | Number of errors encountered during query execution. Non-zero values require investigation. |\n| Warning Count | integer |  | hidden | Number of warnings generated during query execution. Warnings may indicate suboptimal query patterns or index usage. |\n| User | string |  |  | Couchbase user who executed the query. Useful for identifying workload by user or application. |\n| Client Context ID | string |  | hidden | Client-provided context identifier for request tracking and correlation. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Couchbase instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchbase.bucket_quota_percent_used | a dimension per bucket | percentage |\n| couchbase.bucket_ops_per_sec | a dimension per bucket | ops/s |\n| couchbase.bucket_disk_fetches | a dimension per bucket | fetches |\n| couchbase.bucket_item_count | a dimension per bucket | items |\n| couchbase.bucket_disk_used_stats | a dimension per bucket | bytes |\n| couchbase.bucket_data_used | a dimension per bucket | bytes |\n| couchbase.bucket_mem_used | a dimension per bucket | bytes |\n| couchbase.bucket_vb_active_num_non_resident | a dimension per bucket | items |\n\n",integration_type:"collector",id:"go.d.plugin-couchbase-Couchbase",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/couchbase/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-couchdb",plugin_name:"go.d.plugin",module_name:"couchdb",monitored_instance:{name:"CouchDB",link:"https://couchdb.apache.org/",icon_filename:"couchdb.svg",categories:["data-collection.databases"]},keywords:["couchdb","databases"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# CouchDB\n\nPlugin: go.d.plugin\nModule: couchdb\n\n## Overview\n\nThis collector monitors CouchDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **couchdb** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **couchdb**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/couchdb.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:5984 | yes |\n|  | node | CouchDB node name (same as the `-name` argument in `vm.args`). | _local | yes |\n|  | timeout | HTTP request timeout (seconds). | 2 | no |\n| **Filters** | databases | Space-separated list of databases to collect stats for. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **couchdb** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the couchdb data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _couchdb_ (or scroll the list) to locate the **couchdb** collector.\n5. Click the **+** next to the **couchdb** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/couchdb.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchdb.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:5984\n\n```\n{% /details %}\n###### Basic HTTP auth\n\nLocal server with basic HTTP authentication, node name and multiple databases defined. Make sure to match the node name with the `NODENAME` value in your CouchDB\'s `etc/vm.args` file.  Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:5984\n    node: couchdb@127.0.0.1\n    databases: my-db other-db\n    username: foo\n    password: bar\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:5984\n\n  - name: remote\n    url: http://203.0.113.0:5984\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `couchdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m couchdb\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m couchdb -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `couchdb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep couchdb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep couchdb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep couchdb\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CouchDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchdb.activity | db_reads, db_writes, view_reads | requests/s |\n| couchdb.request_methods | copy, delete, get, head, options, post, put | requests/s |\n| couchdb.response_codes | 200, 201, 202, 204, 206, 301, 302, 304, 400, 401, 403, 404, 406, 409, 412, 413, 414, 415, 416, 417, 500, 501, 503 | responses/s |\n| couchdb.response_code_classes | 2xx, 3xx, 4xx, 5xx | responses/s |\n| couchdb.active_tasks | indexer, db_compaction, replication, view_compaction | tasks |\n| couchdb.replicator_jobs | running, pending, crashed, internal_replication_jobs | jobs |\n| couchdb.open_files | files | files |\n| couchdb.erlang_vm_memory | atom, binaries, code, ets, procs, other | B |\n| couchdb.proccounts | os_procs, erl_procs | processes |\n| couchdb.peakmsgqueue | peak_size | messages |\n| couchdb.reductions | reductions | reductions |\n| couchdb.db_sizes_file | a dimension per database | KiB |\n| couchdb.db_sizes_external | a dimension per database | KiB |\n| couchdb.db_sizes_active | a dimension per database | KiB |\n| couchdb.db_doc_count | a dimension per database | docs |\n| couchdb.db_doc_del_count | a dimension per database | docs |\n\n",integration_type:"collector",id:"go.d.plugin-couchdb-CouchDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/couchdb/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-dcgm",plugin_name:"go.d.plugin",module_name:"dcgm",monitored_instance:{name:"Nvidia Data Center GPU Manager (DCGM)",link:"https://github.com/NVIDIA/dcgm-exporter",icon_filename:"nvidia.svg",categories:["data-collection.hardware-and-sensors"]},keywords:["nvidia","gpu","dcgm","dcgm-exporter"],related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"nvidia_smi"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# Nvidia Data Center GPU Manager (DCGM)\n\nPlugin: go.d.plugin\nModule: dcgm\n\n## Overview\n\nThis collector gathers NVIDIA GPU telemetry from a `dcgm-exporter` endpoint.\nIt supports all numeric fields exposed by the exporter and maps them into Netdata-native contexts.\n\n\nIt collects metrics by periodically scraping the exporter Prometheus endpoint over HTTP.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nNvidia Data Center GPU Manager (DCGM) can be monitored further using the following other integrations:\n\n- {% relatedResource id="go.d.plugin-nvidia_smi-Nvidia_GPU" %}Nvidia GPU{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration does not support auto-detection in v1.\n\n#### Limits\n\nThe collector applies global and per-metric time series limits to prevent excessive cardinality.\n\n\n#### Performance Impact\n\nThe impact depends on dcgm-exporter field selection and resulting series cardinality.\n',setup:'## Setup\n\n\nYou can configure the **dcgm** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **dcgm**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/dcgm.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Run dcgm-exporter\n\nInstall DCGM and run `dcgm-exporter` so that a Prometheus endpoint is available (default `:9400/metrics`).\n\n#### Configure exporter field list\n\nThe default exporter profile exposes a small subset of fields.\nUse the Netdata recommended profile:\n[`dcgm-exporter-netdata.csv`](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/dcgm/dcgm-exporter-netdata.csv)\n(raw download: `https://raw.githubusercontent.com/netdata/netdata/master/src/go/plugin/go.d/collector/dcgm/dcgm-exporter-netdata.csv`).\n\nThe Netdata profile enables 127 fields by default and documents all remaining known DCGM fields as commented entries.\nTo customize beyond the baseline, uncomment the field you need and comment one currently enabled field.\n\nRuntime validation artifact:\n`src/go/plugin/go.d/collector/dcgm/runtime-validation-driver-590.48.01-dcgm-exporter-4.4.1-4.5.2.md`\nand\n`src/go/plugin/go.d/collector/dcgm/runtime-validation-driver-590.48.01-dcgm-exporter-4.4.1-4.5.2.json`\n\nValidation is primarily version-scoped (NVIDIA driver + DCGM/DCGM-exporter versions), so treat it as a strong baseline rather than universal compatibility.\n\nExample:\n`dcgm-exporter -f /path/to/dcgm-exporter-netdata.csv`\n\n\n#### Keep collection intervals aligned\n\nSet Netdata `update_every` to the same value as dcgm-exporter collection interval (default 30 seconds).\nExample exporter interval: `dcgm-exporter -c 30000` and Netdata `update_every: 30`.\n\n\n#### Enable profiling capabilities (optional)\n\nProfiling fields may require additional privileges/capabilities in your runtime environment.\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). Keep this aligned with dcgm-exporter collection interval. | 30 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | DCGM exporter metrics endpoint URL. | http://127.0.0.1:9400/metrics | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Limits** | max_time_series | Global time series limit. If exceeded, collection is skipped for this cycle. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics above this limit are skipped. | 200 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token. |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy authentication. |  | no |\n|  | proxy_password | Password for proxy authentication. |  | no |\n| **Request** | headers | Additional HTTP headers to include in the request. |  | no |\n|  | method | HTTP method. | GET | no |\n|  | body | HTTP request body. |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associate this job with a Virtual Node. |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **dcgm** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the dcgm data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _dcgm_ (or scroll the list) to locate the **dcgm** collector.\n5. Click the **+** next to the **dcgm** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/dcgm.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dcgm.conf\n```\n\n##### Examples\n\n###### Local exporter\n\nCollect metrics from a local dcgm-exporter endpoint.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9400/metrics\n    update_every: 30\n\n```\n{% /details %}\n###### TLS endpoint\n\nCollect metrics over HTTPS with custom CA certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: secure\n    url: https://dcgm-exporter.example.com:9400/metrics\n    update_every: 30\n    tls_ca: /etc/netdata/certs/dcgm-ca.crt\n\n```\n{% /details %}\n###### Increased cardinality limits\n\nIncrease limits when collecting large field sets and multiple entities.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: dcgm_large\n    url: http://127.0.0.1:9400/metrics\n    update_every: 30\n    max_time_series: 10000\n    max_time_series_per_metric: 2000\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dcgm` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m dcgm\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m dcgm -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dcgm` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dcgm\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dcgm /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dcgm\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ dcgm_gpu_xid_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dcgm.conf) | dcgm.gpu.reliability.xid | NVIDIA driver reported GPU XID error on GPU ${label:gpu} |\n| [ dcgm_gpu_row_remap_failure ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dcgm.conf) | dcgm.gpu.reliability.row_remap_status | GPU row remapping failed on GPU ${label:gpu} |\n| [ dcgm_gpu_uncorrectable_remapped_rows ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dcgm.conf) | dcgm.gpu.reliability.row_remap_events | Uncorrectable remapped rows increased on GPU ${label:gpu} |\n| [ dcgm_gpu_power_violation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dcgm.conf) | dcgm.gpu.throttle.violations | Power throttling detected on GPU ${label:gpu} |\n| [ dcgm_gpu_thermal_violation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dcgm.conf) | dcgm.gpu.throttle.violations | Thermal throttling detected on GPU ${label:gpu} |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMetrics are grouped into static Netdata contexts. Contexts are created only when matching DCGM fields are present in the exporter output.\n\n\n### Per gpu\n\nThese metrics refer to GPU device instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| gpu | gpu label from exporter metrics. |\n| uuid | uuid label from exporter metrics. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dcgm.gpu.capability.support | cc_mode, cuda_compute_capability, gpm_support, mig_attributes, mig_ci_info, mig_gi_info, mig_max_slices, supported_clocks, supported_type_info | state |\n| dcgm.gpu.clock.frequency | app_mem_clock, app_sm_clock, max_mem_clock, max_sm_clock, max_video_clock, memory, sm, video_clock | MHz |\n| dcgm.gpu.compute.activity | dram, fp16, fp32, fp64, graphics_engine_active, integer, sm_active, sm_occupancy, tensor | % |\n| dcgm.gpu.compute.tensor.activity | tensor_dfma, tensor_hmma, tensor_imma | % |\n| dcgm.gpu.compute.media.activity | nvdec0_active, nvdec1_active, nvdec2_active, nvdec3_active, nvdec4_active, nvdec5_active, nvdec6_active, nvdec7_active, nvjpg0_active, nvjpg1_active, nvjpg2_active, nvjpg3_active, nvjpg4_active, nvjpg5_active, nvjpg6_active, nvjpg7_active, nvofa0_active, nvofa1_active | % |\n| dcgm.gpu.compute.cache.activity | hostmem_cache_hit, hostmem_cache_miss, peermem_cache_hit, peermem_cache_miss | events/s |\n| dcgm.gpu.compute.utilization | decoder, encoder, gpu, memory_copy | % |\n| dcgm.gpu.cpu.power | module_power_util_current, sysio_power_util_current | Watts |\n| dcgm.gpu.cpu.info | cpu_model, cpu_vendor | value |\n| dcgm.gpu.diagnostics.results | diag_diagnostic_result, diag_eud_result, diag_memory_bandwidth_result, diag_memory_result, diag_memtest_result, diag_nccl_tests_result, diag_nvbandwidth_result, diag_pulse_test_result, diag_software_result, diag_targeted_power_result, diag_targeted_stress_result | state |\n| dcgm.gpu.diagnostics.status | diag_status | state |\n| dcgm.gpu.health.status | imex_daemon_status, imex_domain_status | state |\n| dcgm.gpu.interconnect.connectx.error_status | connectx_correctable_err_mask, connectx_correctable_err_status, connectx_uncorrectable_err_mask, connectx_uncorrectable_err_severity, connectx_uncorrectable_err_status | state |\n| dcgm.gpu.interconnect.connectx.errors | connectx_correctable_err_mask, connectx_correctable_err_status, connectx_uncorrectable_err_mask, connectx_uncorrectable_err_severity, connectx_uncorrectable_err_status | errors/s |\n| dcgm.gpu.interconnect.connectx.link | connectx_active_pcie_link_speed, connectx_expect_pcie_link_speed | value |\n| dcgm.gpu.interconnect.connectx.status | connectx_health | state |\n| dcgm.gpu.interconnect.error_rate | c2c_link_error_intr, c2c_link_error_replay, c2c_link_error_replay_b2b | errors/s |\n| dcgm.gpu.interconnect.fabric | fabric_clique_id, fabric_cluster_uuid, fabric_health_mask, fabric_manager_error_code, fabric_manager_status | state |\n| dcgm.gpu.interconnect.nvlink.error_rate | gpu_nvlink_errors | errors/s |\n| dcgm.gpu.interconnect.pcie.error_rate | pcie_count_correctable_errors, pcie_replay | errors/s |\n| dcgm.gpu.interconnect.pcie.link.generation | link_gen, max_link_gen | generation |\n| dcgm.gpu.interconnect.pcie.link.width | connectx_active_pcie_link_width, connectx_expect_pcie_link_width, link_width, max_link_width | lanes |\n| dcgm.gpu.interconnect.state | c2c_link, c2c_link_power_state, c2c_link_status | state |\n| dcgm.gpu.interconnect.pcie.state | diag_pcie_result | state |\n| dcgm.gpu.interconnect.throughput | c2c_max_bandwidth, c2c_rx_all_bytes, c2c_rx_data_bytes, c2c_tx_all_bytes, c2c_tx_data_bytes | B/s |\n| dcgm.gpu.interconnect.pcie.throughput | pcie_rx, pcie_rx_throughput, pcie_tx, pcie_tx_throughput | B/s |\n| dcgm.gpu.interconnect.nvlink.throughput | nvlink_rx, nvlink_tx | B/s |\n| dcgm.gpu.interconnect.total.throughput | pcie, nvlink | B/s |\n| dcgm.gpu.internal.boundary | first_connectx_field_id, first_vgpu_field_id, internal_fields_0_end, internal_fields_0_start, last_connectx_field_id, last_vgpu_field_id | state |\n| dcgm.gpu.inventory.identity | brand, count, cuda_visible_devices_str, minor_number, name, nvml_index, serial, uuid | value |\n| dcgm.gpu.inventory.platform | platform_chassis_serial_number, platform_chassis_slot_number, platform_host_id, platform_infiniband_guid, platform_module_id, platform_peer_type, platform_tray_index | value |\n| dcgm.gpu.inventory.software | inforom_config_check, inforom_config_valid, inforom_image_ver, oem_inforom_ver, power_inforom_ver, process_name, vbios_version | value |\n| dcgm.gpu.memory.bar1_usage | free, used | B |\n| dcgm.gpu.memory.bar1_capacity | total | B |\n| dcgm.gpu.memory.ecc_error_rate | ecc_current, ecc_dbe_agg, ecc_dbe_agg_cbu, ecc_dbe_agg_dev, ecc_dbe_agg_l1, ecc_dbe_agg_l2, ecc_dbe_agg_reg, ecc_dbe_agg_shm, ecc_dbe_agg_srm, ecc_dbe_agg_tex, ecc_dbe_vol, ecc_dbe_vol_cbu, ecc_dbe_vol_dev, ecc_dbe_vol_l1, ecc_dbe_vol_l2, ecc_dbe_vol_reg, ecc_dbe_vol_shm, ecc_dbe_vol_srm, ecc_dbe_vol_tex, ecc_pending, ecc_sbe_agg, ecc_sbe_agg_cbu, ecc_sbe_agg_dev, ecc_sbe_agg_l1, ecc_sbe_agg_l2, ecc_sbe_agg_reg, ecc_sbe_agg_shm, ecc_sbe_agg_srm, ecc_sbe_agg_tex, ecc_sbe_vol, ecc_sbe_vol_cbu, ecc_sbe_vol_dev, ecc_sbe_vol_l1, ecc_sbe_vol_l2, ecc_sbe_vol_reg, ecc_sbe_vol_shm, ecc_sbe_vol_srm, ecc_sbe_vol_tex | errors/s |\n| dcgm.gpu.memory.ecc_errors | ecc_current, ecc_dbe_agg_cbu, ecc_dbe_agg_dev, ecc_dbe_agg_l1, ecc_dbe_agg_l2, ecc_dbe_agg_reg, ecc_dbe_agg_shm, ecc_dbe_agg_srm, ecc_dbe_agg_tex, ecc_dbe_vol_cbu, ecc_dbe_vol_dev, ecc_dbe_vol_l1, ecc_dbe_vol_l2, ecc_dbe_vol_reg, ecc_dbe_vol_shm, ecc_dbe_vol_srm, ecc_dbe_vol_tex, ecc_inforom_ver, ecc_pending, ecc_sbe_agg_cbu, ecc_sbe_agg_dev, ecc_sbe_agg_l1, ecc_sbe_agg_l2, ecc_sbe_agg_reg, ecc_sbe_agg_shm, ecc_sbe_agg_srm, ecc_sbe_agg_tex, ecc_sbe_vol_cbu, ecc_sbe_vol_dev, ecc_sbe_vol_l1, ecc_sbe_vol_l2, ecc_sbe_vol_reg, ecc_sbe_vol_shm, ecc_sbe_vol_srm, ecc_sbe_vol_tex | errors |\n| dcgm.gpu.memory.page_retirements | retired_dbe, retired_pending, retired_sbe | pages/s |\n| dcgm.gpu.memory.usage | free, reserved, used | B |\n| dcgm.gpu.memory.capacity | total | B |\n| dcgm.gpu.memory.utilization | used_percent | % |\n| dcgm.gpu.power.energy | total | mJ/s |\n| dcgm.gpu.power.profiles | enforced_power_profile_mask, requested_power_profile_mask, valid_power_profile_mask | state |\n| dcgm.gpu.power.smoothing | pwr_smoothing_active_preset_profile, pwr_smoothing_admin_override_percent_tmp_floor, pwr_smoothing_admin_override_ramp_down_hyst_val, pwr_smoothing_admin_override_ramp_down_rate, pwr_smoothing_admin_override_ramp_up_rate, pwr_smoothing_applied_tmp_ceil, pwr_smoothing_applied_tmp_floor, pwr_smoothing_enabled, pwr_smoothing_hw_circuitry_percent_lifetime_remaining, pwr_smoothing_imm_ramp_down_enabled, pwr_smoothing_max_num_preset_profiles, pwr_smoothing_max_percent_tmp_floor_setting, pwr_smoothing_min_percent_tmp_floor_setting, pwr_smoothing_priv_lvl, pwr_smoothing_profile_percent_tmp_floor, pwr_smoothing_profile_ramp_down_hyst_val, pwr_smoothing_profile_ramp_down_rate, pwr_smoothing_profile_ramp_up_rate | value |\n| dcgm.gpu.power.usage | draw, enforced_limit, power_mgmt_limit, power_mgmt_limit_def, power_mgmt_limit_max, power_mgmt_limit_min, power_usage_instant | Watts |\n| dcgm.gpu.reliability.memory_health | banks_remap_rows_avail_high, banks_remap_rows_avail_low, banks_remap_rows_avail_max, banks_remap_rows_avail_none, banks_remap_rows_avail_partial, memory_unrepairable_flag, threshold_srm | state |\n| dcgm.gpu.reliability.recovery_action | get_gpu_recovery_action | state |\n| dcgm.gpu.reliability.row_remap_events | correctable_remapped_rows, uncorrectable_remapped_rows | rows/s |\n| dcgm.gpu.reliability.row_remap_status | row_remap_failure, row_remap_pending | state |\n| dcgm.gpu.reliability.xid | xid | code |\n| dcgm.gpu.state.configuration | autoboost, compute_mode, persistence_mode, sync_boost, sync_boost_violation | state |\n| dcgm.gpu.state.performance | pstate | state |\n| dcgm.gpu.state.virtualization | mig_mode, virtual_mode | state |\n| dcgm.gpu.thermal.fan_speed | fan_speed | % |\n| dcgm.gpu.thermal.temperature | connectx_device_temperature, gpu, gpu_max_op_temp, gpu_temp_limit, mem_max_op_temp, memory, shutdown_temp, slowdown_temp | Celsius |\n| dcgm.gpu.throttle.reasons | clocks_event_reasons | bitmask |\n| dcgm.gpu.throttle.violations | board_limit_violation, hw_power_brake_slowdown, hw_therm_slowdown, low_utilization_violation, power_violation, reliability_violation, sw_power_cap, sw_therm_slowdown, sync_boost, thermal_violation, total_app_clocks_violation, total_base_clocks_violation | milliseconds/s |\n| dcgm.gpu.topology.affinity | cpu_affinity_0, cpu_affinity_1, cpu_affinity_2, cpu_affinity_3, gpu_topology_affinity, gpu_topology_pci, mem_affinity_0, mem_affinity_1, mem_affinity_2, mem_affinity_3, pci_busid, pci_combined_id, pci_subsys_id | value |\n| dcgm.gpu.virtualization.vgpu.frame_rate | vgpu_frame_rate_limit | fps |\n| dcgm.gpu.virtualization.vgpu.instance | vgpu_instance_ids, vgpu_pci_id, vgpu_uuid | value |\n| dcgm.gpu.virtualization.vgpu.license | vgpu_instance_license_state, vgpu_license_status, vgpu_type_license | state |\n| dcgm.gpu.virtualization.vgpu.memory | vgpu_memory_usage | B |\n| dcgm.gpu.virtualization.vgpu.sessions | vgpu_enc_sessions_info, vgpu_enc_stats, vgpu_fbc_sessions_info, vgpu_fbc_stats | value |\n| dcgm.gpu.virtualization.vgpu.software | vgpu_driver_version | value |\n| dcgm.gpu.virtualization.vgpu.type | creatable_vgpu_type_ids, supported_vgpu_type_ids, vgpu_type, vgpu_type_class, vgpu_type_info, vgpu_type_name | value |\n| dcgm.gpu.virtualization.vgpu.utilization | vgpu_per_process_utilization | % |\n| dcgm.gpu.virtualization.vgpu.vm | vgpu_vm_gpu_instance_id, vgpu_vm_id, vgpu_vm_name | value |\n| dcgm.gpu.workload.sessions | accounting_data, enc_stats, fbc_sessions_info, fbc_stats | value |\n\n### Per mig\n\nThese metrics refer to MIG instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| gpu | gpu label from exporter metrics. |\n| gpu_i_id | gpu_i_id label from exporter metrics. |\n| gpu_i_profile | gpu_i_profile label from exporter metrics. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dcgm.mig.clock.frequency | app_mem_clock, app_sm_clock, max_mem_clock, max_sm_clock, max_video_clock, memory, sm, video_clock | MHz |\n| dcgm.mig.compute.activity | dram, fp16, fp32, fp64, graphics_engine_active, integer, sm_active, sm_occupancy, tensor | % |\n| dcgm.mig.compute.tensor.activity | tensor_dfma, tensor_hmma, tensor_imma | % |\n| dcgm.mig.compute.media.activity | nvdec0_active, nvdec1_active, nvdec2_active, nvdec3_active, nvdec4_active, nvdec5_active, nvdec6_active, nvdec7_active, nvjpg0_active, nvjpg1_active, nvjpg2_active, nvjpg3_active, nvjpg4_active, nvjpg5_active, nvjpg6_active, nvjpg7_active, nvofa0_active, nvofa1_active | % |\n| dcgm.mig.compute.cache.activity | hostmem_cache_hit, hostmem_cache_miss, peermem_cache_hit, peermem_cache_miss | events/s |\n| dcgm.mig.compute.utilization | decoder, encoder, gpu, memory_copy | % |\n| dcgm.mig.interconnect.nvlink.ber | nvlink_count_effective_ber, nvlink_count_effective_ber_float, nvlink_count_symbol_ber, nvlink_count_symbol_ber_float | ratio |\n| dcgm.mig.interconnect.nvlink.congestion | nvlink_ppcnt_ibpc_port_xmit_wait | events/s |\n| dcgm.mig.interconnect.error_rate | c2c_link_error_intr, c2c_link_error_replay, c2c_link_error_replay_b2b | errors/s |\n| dcgm.mig.interconnect.nvlink.error_rate | gpu_nvlink_errors, nvlink_count_effective_errors, nvlink_count_fec_history_0, nvlink_count_fec_history_1, nvlink_count_fec_history_10, nvlink_count_fec_history_11, nvlink_count_fec_history_12, nvlink_count_fec_history_13, nvlink_count_fec_history_14, nvlink_count_fec_history_15, nvlink_count_fec_history_2, nvlink_count_fec_history_3, nvlink_count_fec_history_4, nvlink_count_fec_history_5, nvlink_count_fec_history_6, nvlink_count_fec_history_7, nvlink_count_fec_history_8, nvlink_count_fec_history_9, nvlink_count_link_recovery_events, nvlink_count_link_recovery_failed_events, nvlink_count_link_recovery_successful_events, nvlink_count_local_link_integrity_errors, nvlink_count_rx_buffer_overrun_errors, nvlink_count_rx_errors, nvlink_count_rx_general_errors, nvlink_count_rx_malformed_packet_errors, nvlink_count_rx_remote_errors, nvlink_count_rx_symbol_errors, nvlink_count_tx_discards, nvlink_crc_data_error, nvlink_crc_data_error_count_l0, nvlink_crc_data_error_count_l1, nvlink_crc_data_error_count_l10, nvlink_crc_data_error_count_l11, nvlink_crc_data_error_count_l12, nvlink_crc_data_error_count_l13, nvlink_crc_data_error_count_l14, nvlink_crc_data_error_count_l15, nvlink_crc_data_error_count_l16, nvlink_crc_data_error_count_l17, nvlink_crc_data_error_count_l2, nvlink_crc_data_error_count_l3, nvlink_crc_data_error_count_l4, nvlink_crc_data_error_count_l5, nvlink_crc_data_error_count_l6, nvlink_crc_data_error_count_l7, nvlink_crc_data_error_count_l8, nvlink_crc_data_error_count_l9, nvlink_crc_flit_error, nvlink_crc_flit_error_count_l0, nvlink_crc_flit_error_count_l1, nvlink_crc_flit_error_count_l10, nvlink_crc_flit_error_count_l11, nvlink_crc_flit_error_count_l12, nvlink_crc_flit_error_count_l13, nvlink_crc_flit_error_count_l14, nvlink_crc_flit_error_count_l15, nvlink_crc_flit_error_count_l16, nvlink_crc_flit_error_count_l17, nvlink_crc_flit_error_count_l2, nvlink_crc_flit_error_count_l3, nvlink_crc_flit_error_count_l4, nvlink_crc_flit_error_count_l5, nvlink_crc_flit_error_count_l6, nvlink_crc_flit_error_count_l7, nvlink_crc_flit_error_count_l8, nvlink_crc_flit_error_count_l9, nvlink_error_dl_crc, nvlink_error_dl_recovery, nvlink_error_dl_replay, nvlink_ppcnt_physical_successful_recovery_events, nvlink_ppcnt_plr_rcv_uncorrectable_code, nvlink_ppcnt_recovery_time_since_last, nvlink_ppcnt_recovery_total_successful_events, nvlink_pprm_oper_recovery, nvlink_recovery_error, nvlink_recovery_error_count_l0, nvlink_recovery_error_count_l1, nvlink_recovery_error_count_l10, nvlink_recovery_error_count_l11, nvlink_recovery_error_count_l12, nvlink_recovery_error_count_l13, nvlink_recovery_error_count_l14, nvlink_recovery_error_count_l15, nvlink_recovery_error_count_l16, nvlink_recovery_error_count_l17, nvlink_recovery_error_count_l2, nvlink_recovery_error_count_l3, nvlink_recovery_error_count_l4, nvlink_recovery_error_count_l5, nvlink_recovery_error_count_l6, nvlink_recovery_error_count_l7, nvlink_recovery_error_count_l8, nvlink_recovery_error_count_l9, nvlink_replay_error, nvlink_replay_error_count_l0, nvlink_replay_error_count_l1, nvlink_replay_error_count_l10, nvlink_replay_error_count_l11, nvlink_replay_error_count_l12, nvlink_replay_error_count_l13, nvlink_replay_error_count_l14, nvlink_replay_error_count_l15, nvlink_replay_error_count_l16, nvlink_replay_error_count_l17, nvlink_replay_error_count_l2, nvlink_replay_error_count_l3, nvlink_replay_error_count_l4, nvlink_replay_error_count_l5, nvlink_replay_error_count_l6, nvlink_replay_error_count_l7, nvlink_replay_error_count_l8, nvlink_replay_error_count_l9 | errors/s |\n| dcgm.mig.interconnect.pcie.error_rate | pcie_count_correctable_errors, pcie_replay | errors/s |\n| dcgm.mig.interconnect.nvlink.errors | nvlink_ppcnt_plr_rcv_uncorrectable_code | errors |\n| dcgm.mig.interconnect.fabric | fabric_clique_id, fabric_cluster_uuid, fabric_health_mask, fabric_manager_error_code, fabric_manager_status | state |\n| dcgm.mig.interconnect.pcie.link.generation | link_gen, max_link_gen | generation |\n| dcgm.mig.interconnect.pcie.link.width | link_width, max_link_width | lanes |\n| dcgm.mig.interconnect.state | c2c_link, c2c_link_power_state, c2c_link_status | state |\n| dcgm.mig.interconnect.pcie.state | diag_pcie_result | state |\n| dcgm.mig.interconnect.nvlink.state | gpu_topology_nvlink, nvlink_get_state, nvlink_ppcnt_physical_link_down_counter, nvlink_ppcnt_plr_rcv_code_err, nvlink_ppcnt_plr_sync_events, nvlink_ppcnt_plr_xmit_retry_events, p2p_nvlink_status | state |\n| dcgm.mig.interconnect.throughput | c2c_max_bandwidth, c2c_rx_all_bytes, c2c_rx_data_bytes, c2c_tx_all_bytes, c2c_tx_data_bytes | B/s |\n| dcgm.mig.interconnect.nvlink.throughput | nvlink_bandwidth_l0, nvlink_bandwidth_l1, nvlink_bandwidth_l10, nvlink_bandwidth_l11, nvlink_bandwidth_l12, nvlink_bandwidth_l13, nvlink_bandwidth_l14, nvlink_bandwidth_l15, nvlink_bandwidth_l16, nvlink_bandwidth_l17, nvlink_bandwidth_l2, nvlink_bandwidth_l3, nvlink_bandwidth_l4, nvlink_bandwidth_l5, nvlink_bandwidth_l6, nvlink_bandwidth_l7, nvlink_bandwidth_l8, nvlink_bandwidth_l9, nvlink_count_rx, nvlink_count_tx, nvlink_l0_rx, nvlink_l0_tx, nvlink_l10_rx, nvlink_l10_tx, nvlink_l11_rx, nvlink_l11_tx, nvlink_l12_rx, nvlink_l12_tx, nvlink_l13_rx, nvlink_l13_tx, nvlink_l14_rx, nvlink_l14_tx, nvlink_l15_rx, nvlink_l15_tx, nvlink_l16_rx, nvlink_l16_tx, nvlink_l17_rx, nvlink_l17_tx, nvlink_l1_rx, nvlink_l1_tx, nvlink_l2_rx, nvlink_l2_tx, nvlink_l3_rx, nvlink_l3_tx, nvlink_l4_rx, nvlink_l4_tx, nvlink_l5_rx, nvlink_l5_tx, nvlink_l6_rx, nvlink_l6_tx, nvlink_l7_rx, nvlink_l7_tx, nvlink_l8_rx, nvlink_l8_tx, nvlink_l9_rx, nvlink_l9_tx, nvlink_rx_bandwidth, nvlink_rx_bandwidth_l0, nvlink_rx_bandwidth_l1, nvlink_rx_bandwidth_l10, nvlink_rx_bandwidth_l11, nvlink_rx_bandwidth_l12, nvlink_rx_bandwidth_l13, nvlink_rx_bandwidth_l14, nvlink_rx_bandwidth_l15, nvlink_rx_bandwidth_l16, nvlink_rx_bandwidth_l17, nvlink_rx_bandwidth_l2, nvlink_rx_bandwidth_l3, nvlink_rx_bandwidth_l4, nvlink_rx_bandwidth_l5, nvlink_rx_bandwidth_l6, nvlink_rx_bandwidth_l7, nvlink_rx_bandwidth_l8, nvlink_rx_bandwidth_l9, nvlink_rx, nvlink_tx_bandwidth, nvlink_tx_bandwidth_l0, nvlink_tx_bandwidth_l1, nvlink_tx_bandwidth_l10, nvlink_tx_bandwidth_l11, nvlink_tx_bandwidth_l12, nvlink_tx_bandwidth_l13, nvlink_tx_bandwidth_l14, nvlink_tx_bandwidth_l15, nvlink_tx_bandwidth_l16, nvlink_tx_bandwidth_l17, nvlink_tx_bandwidth_l2, nvlink_tx_bandwidth_l3, nvlink_tx_bandwidth_l4, nvlink_tx_bandwidth_l5, nvlink_tx_bandwidth_l6, nvlink_tx_bandwidth_l7, nvlink_tx_bandwidth_l8, nvlink_tx_bandwidth_l9, nvlink_tx | B/s |\n| dcgm.mig.interconnect.pcie.throughput | pcie_rx, pcie_rx_throughput, pcie_tx, pcie_tx_throughput | B/s |\n| dcgm.mig.interconnect.total.throughput | pcie, nvlink | B/s |\n| dcgm.mig.interconnect.nvlink.traffic | nvlink_count_rx_packets, nvlink_count_tx_packets, nvlink_ppcnt_plr_rcv_codes, nvlink_ppcnt_plr_xmit_codes, nvlink_ppcnt_plr_xmit_retry_codes | events/s |\n| dcgm.mig.memory.bar1_usage | free, used | B |\n| dcgm.mig.memory.bar1_capacity | total | B |\n| dcgm.mig.memory.ecc_error_rate | ecc_current, ecc_dbe_agg, ecc_dbe_agg_cbu, ecc_dbe_agg_dev, ecc_dbe_agg_l1, ecc_dbe_agg_l2, ecc_dbe_agg_reg, ecc_dbe_agg_shm, ecc_dbe_agg_srm, ecc_dbe_agg_tex, ecc_dbe_vol, ecc_dbe_vol_cbu, ecc_dbe_vol_dev, ecc_dbe_vol_l1, ecc_dbe_vol_l2, ecc_dbe_vol_reg, ecc_dbe_vol_shm, ecc_dbe_vol_srm, ecc_dbe_vol_tex, ecc_pending, ecc_sbe_agg, ecc_sbe_agg_cbu, ecc_sbe_agg_dev, ecc_sbe_agg_l1, ecc_sbe_agg_l2, ecc_sbe_agg_reg, ecc_sbe_agg_shm, ecc_sbe_agg_srm, ecc_sbe_agg_tex, ecc_sbe_vol, ecc_sbe_vol_cbu, ecc_sbe_vol_dev, ecc_sbe_vol_l1, ecc_sbe_vol_l2, ecc_sbe_vol_reg, ecc_sbe_vol_shm, ecc_sbe_vol_srm, ecc_sbe_vol_tex, nvlink_ecc_data_error | errors/s |\n| dcgm.mig.memory.ecc_errors | ecc_current, ecc_dbe_agg_cbu, ecc_dbe_agg_dev, ecc_dbe_agg_l1, ecc_dbe_agg_l2, ecc_dbe_agg_reg, ecc_dbe_agg_shm, ecc_dbe_agg_srm, ecc_dbe_agg_tex, ecc_dbe_vol_cbu, ecc_dbe_vol_dev, ecc_dbe_vol_l1, ecc_dbe_vol_l2, ecc_dbe_vol_reg, ecc_dbe_vol_shm, ecc_dbe_vol_srm, ecc_dbe_vol_tex, ecc_inforom_ver, ecc_pending, ecc_sbe_agg_cbu, ecc_sbe_agg_dev, ecc_sbe_agg_l1, ecc_sbe_agg_l2, ecc_sbe_agg_reg, ecc_sbe_agg_shm, ecc_sbe_agg_srm, ecc_sbe_agg_tex, ecc_sbe_vol_cbu, ecc_sbe_vol_dev, ecc_sbe_vol_l1, ecc_sbe_vol_l2, ecc_sbe_vol_reg, ecc_sbe_vol_shm, ecc_sbe_vol_srm, ecc_sbe_vol_tex | errors |\n| dcgm.mig.memory.page_retirements | retired_dbe, retired_pending, retired_sbe | pages/s |\n| dcgm.mig.memory.usage | free, reserved, used | B |\n| dcgm.mig.memory.capacity | total | B |\n| dcgm.mig.memory.utilization | used_percent | % |\n| dcgm.mig.power.energy | total | mJ/s |\n| dcgm.mig.power.profiles | enforced_power_profile_mask, requested_power_profile_mask, valid_power_profile_mask | state |\n| dcgm.mig.power.smoothing | pwr_smoothing_active_preset_profile, pwr_smoothing_admin_override_percent_tmp_floor, pwr_smoothing_admin_override_ramp_down_hyst_val, pwr_smoothing_admin_override_ramp_down_rate, pwr_smoothing_admin_override_ramp_up_rate, pwr_smoothing_applied_tmp_ceil, pwr_smoothing_applied_tmp_floor, pwr_smoothing_enabled, pwr_smoothing_hw_circuitry_percent_lifetime_remaining, pwr_smoothing_imm_ramp_down_enabled, pwr_smoothing_max_num_preset_profiles, pwr_smoothing_max_percent_tmp_floor_setting, pwr_smoothing_min_percent_tmp_floor_setting, pwr_smoothing_priv_lvl, pwr_smoothing_profile_percent_tmp_floor, pwr_smoothing_profile_ramp_down_hyst_val, pwr_smoothing_profile_ramp_down_rate, pwr_smoothing_profile_ramp_up_rate | value |\n| dcgm.mig.power.usage | draw, enforced_limit, power_mgmt_limit, power_mgmt_limit_def, power_mgmt_limit_max, power_mgmt_limit_min, power_usage_instant | Watts |\n| dcgm.mig.reliability.memory_health | banks_remap_rows_avail_high, banks_remap_rows_avail_low, banks_remap_rows_avail_max, banks_remap_rows_avail_none, banks_remap_rows_avail_partial, memory_unrepairable_flag, threshold_srm | state |\n| dcgm.mig.reliability.recovery_action | get_gpu_recovery_action | state |\n| dcgm.mig.reliability.row_remap_events | correctable_remapped_rows, uncorrectable_remapped_rows | rows/s |\n| dcgm.mig.reliability.row_remap_status | row_remap_failure, row_remap_pending | state |\n| dcgm.mig.reliability.xid | xid | code |\n| dcgm.mig.state.configuration | autoboost, compute_mode, persistence_mode, sync_boost, sync_boost_violation | state |\n| dcgm.mig.state.performance | pstate | state |\n| dcgm.mig.state.virtualization | mig_mode, virtual_mode | state |\n| dcgm.mig.thermal.fan_speed | fan_speed | % |\n| dcgm.mig.thermal.temperature | gpu, gpu_max_op_temp, gpu_temp_limit, mem_max_op_temp, memory, shutdown_temp, slowdown_temp | Celsius |\n| dcgm.mig.throttle.reasons | clocks_event_reasons | bitmask |\n| dcgm.mig.throttle.violations | board_limit_violation, hw_power_brake_slowdown, hw_therm_slowdown, low_utilization_violation, power_violation, reliability_violation, sw_power_cap, sw_therm_slowdown, sync_boost, thermal_violation, total_app_clocks_violation, total_base_clocks_violation | milliseconds/s |\n\n### Per nvlink\n\nThese metrics refer to NVLink link instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| gpu | gpu label from exporter metrics. |\n| gpu_uuid | gpu_uuid label from exporter metrics. |\n| nvlink | nvlink label from exporter metrics. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dcgm.nvlink.interconnect.ber | nvlink_count_effective_ber, nvlink_count_effective_ber_float, nvlink_count_symbol_ber, nvlink_count_symbol_ber_float | ratio |\n| dcgm.nvlink.interconnect.congestion | nvlink_ppcnt_ibpc_port_xmit_wait | events/s |\n| dcgm.nvlink.interconnect.error_rate | gpu_nvlink_errors, nvlink_count_effective_errors, nvlink_count_fec_history_0, nvlink_count_fec_history_1, nvlink_count_fec_history_10, nvlink_count_fec_history_11, nvlink_count_fec_history_12, nvlink_count_fec_history_13, nvlink_count_fec_history_14, nvlink_count_fec_history_15, nvlink_count_fec_history_2, nvlink_count_fec_history_3, nvlink_count_fec_history_4, nvlink_count_fec_history_5, nvlink_count_fec_history_6, nvlink_count_fec_history_7, nvlink_count_fec_history_8, nvlink_count_fec_history_9, nvlink_count_link_recovery_events, nvlink_count_link_recovery_failed_events, nvlink_count_link_recovery_successful_events, nvlink_count_local_link_integrity_errors, nvlink_count_rx_buffer_overrun_errors, nvlink_count_rx_errors, nvlink_count_rx_general_errors, nvlink_count_rx_malformed_packet_errors, nvlink_count_rx_remote_errors, nvlink_count_rx_symbol_errors, nvlink_count_tx_discards, nvlink_crc_data_error, nvlink_crc_data_error_count_l0, nvlink_crc_data_error_count_l1, nvlink_crc_data_error_count_l10, nvlink_crc_data_error_count_l11, nvlink_crc_data_error_count_l12, nvlink_crc_data_error_count_l13, nvlink_crc_data_error_count_l14, nvlink_crc_data_error_count_l15, nvlink_crc_data_error_count_l16, nvlink_crc_data_error_count_l17, nvlink_crc_data_error_count_l2, nvlink_crc_data_error_count_l3, nvlink_crc_data_error_count_l4, nvlink_crc_data_error_count_l5, nvlink_crc_data_error_count_l6, nvlink_crc_data_error_count_l7, nvlink_crc_data_error_count_l8, nvlink_crc_data_error_count_l9, nvlink_crc_flit_error, nvlink_crc_flit_error_count_l0, nvlink_crc_flit_error_count_l1, nvlink_crc_flit_error_count_l10, nvlink_crc_flit_error_count_l11, nvlink_crc_flit_error_count_l12, nvlink_crc_flit_error_count_l13, nvlink_crc_flit_error_count_l14, nvlink_crc_flit_error_count_l15, nvlink_crc_flit_error_count_l16, nvlink_crc_flit_error_count_l17, nvlink_crc_flit_error_count_l2, nvlink_crc_flit_error_count_l3, nvlink_crc_flit_error_count_l4, nvlink_crc_flit_error_count_l5, nvlink_crc_flit_error_count_l6, nvlink_crc_flit_error_count_l7, nvlink_crc_flit_error_count_l8, nvlink_crc_flit_error_count_l9, nvlink_error_dl_crc, nvlink_error_dl_recovery, nvlink_error_dl_replay, nvlink_ppcnt_physical_successful_recovery_events, nvlink_ppcnt_plr_rcv_uncorrectable_code, nvlink_ppcnt_recovery_time_since_last, nvlink_ppcnt_recovery_total_successful_events, nvlink_pprm_oper_recovery, nvlink_recovery_error, nvlink_recovery_error_count_l0, nvlink_recovery_error_count_l1, nvlink_recovery_error_count_l10, nvlink_recovery_error_count_l11, nvlink_recovery_error_count_l12, nvlink_recovery_error_count_l13, nvlink_recovery_error_count_l14, nvlink_recovery_error_count_l15, nvlink_recovery_error_count_l16, nvlink_recovery_error_count_l17, nvlink_recovery_error_count_l2, nvlink_recovery_error_count_l3, nvlink_recovery_error_count_l4, nvlink_recovery_error_count_l5, nvlink_recovery_error_count_l6, nvlink_recovery_error_count_l7, nvlink_recovery_error_count_l8, nvlink_recovery_error_count_l9, nvlink_replay_error, nvlink_replay_error_count_l0, nvlink_replay_error_count_l1, nvlink_replay_error_count_l10, nvlink_replay_error_count_l11, nvlink_replay_error_count_l12, nvlink_replay_error_count_l13, nvlink_replay_error_count_l14, nvlink_replay_error_count_l15, nvlink_replay_error_count_l16, nvlink_replay_error_count_l17, nvlink_replay_error_count_l2, nvlink_replay_error_count_l3, nvlink_replay_error_count_l4, nvlink_replay_error_count_l5, nvlink_replay_error_count_l6, nvlink_replay_error_count_l7, nvlink_replay_error_count_l8, nvlink_replay_error_count_l9 | errors/s |\n| dcgm.nvlink.interconnect.errors | nvlink_ppcnt_plr_rcv_uncorrectable_code | errors |\n| dcgm.nvlink.interconnect.state | gpu_topology_nvlink, nvlink_get_state, nvlink_ppcnt_physical_link_down_counter, nvlink_ppcnt_plr_rcv_code_err, nvlink_ppcnt_plr_sync_events, nvlink_ppcnt_plr_xmit_retry_events, p2p_nvlink_status | state |\n| dcgm.nvlink.interconnect.throughput | nvlink_bandwidth, nvlink_bandwidth_l0, nvlink_bandwidth_l1, nvlink_bandwidth_l10, nvlink_bandwidth_l11, nvlink_bandwidth_l12, nvlink_bandwidth_l13, nvlink_bandwidth_l14, nvlink_bandwidth_l15, nvlink_bandwidth_l16, nvlink_bandwidth_l17, nvlink_bandwidth_l2, nvlink_bandwidth_l3, nvlink_bandwidth_l4, nvlink_bandwidth_l5, nvlink_bandwidth_l6, nvlink_bandwidth_l7, nvlink_bandwidth_l8, nvlink_bandwidth_l9, nvlink_count_rx, nvlink_count_tx, nvlink_l0_rx, nvlink_l0_tx, nvlink_l10_rx, nvlink_l10_tx, nvlink_l11_rx, nvlink_l11_tx, nvlink_l12_rx, nvlink_l12_tx, nvlink_l13_rx, nvlink_l13_tx, nvlink_l14_rx, nvlink_l14_tx, nvlink_l15_rx, nvlink_l15_tx, nvlink_l16_rx, nvlink_l16_tx, nvlink_l17_rx, nvlink_l17_tx, nvlink_l1_rx, nvlink_l1_tx, nvlink_l2_rx, nvlink_l2_tx, nvlink_l3_rx, nvlink_l3_tx, nvlink_l4_rx, nvlink_l4_tx, nvlink_l5_rx, nvlink_l5_tx, nvlink_l6_rx, nvlink_l6_tx, nvlink_l7_rx, nvlink_l7_tx, nvlink_l8_rx, nvlink_l8_tx, nvlink_l9_rx, nvlink_l9_tx, nvlink_rx_bandwidth, nvlink_rx_bandwidth_l0, nvlink_rx_bandwidth_l1, nvlink_rx_bandwidth_l10, nvlink_rx_bandwidth_l11, nvlink_rx_bandwidth_l12, nvlink_rx_bandwidth_l13, nvlink_rx_bandwidth_l14, nvlink_rx_bandwidth_l15, nvlink_rx_bandwidth_l16, nvlink_rx_bandwidth_l17, nvlink_rx_bandwidth_l2, nvlink_rx_bandwidth_l3, nvlink_rx_bandwidth_l4, nvlink_rx_bandwidth_l5, nvlink_rx_bandwidth_l6, nvlink_rx_bandwidth_l7, nvlink_rx_bandwidth_l8, nvlink_rx_bandwidth_l9, nvlink_rx, nvlink_tx_bandwidth, nvlink_tx_bandwidth_l0, nvlink_tx_bandwidth_l1, nvlink_tx_bandwidth_l10, nvlink_tx_bandwidth_l11, nvlink_tx_bandwidth_l12, nvlink_tx_bandwidth_l13, nvlink_tx_bandwidth_l14, nvlink_tx_bandwidth_l15, nvlink_tx_bandwidth_l16, nvlink_tx_bandwidth_l17, nvlink_tx_bandwidth_l2, nvlink_tx_bandwidth_l3, nvlink_tx_bandwidth_l4, nvlink_tx_bandwidth_l5, nvlink_tx_bandwidth_l6, nvlink_tx_bandwidth_l7, nvlink_tx_bandwidth_l8, nvlink_tx_bandwidth_l9, nvlink_tx | B/s |\n| dcgm.nvlink.interconnect.traffic | nvlink_count_rx_packets, nvlink_count_tx_packets, nvlink_ppcnt_plr_rcv_codes, nvlink_ppcnt_plr_xmit_codes, nvlink_ppcnt_plr_xmit_retry_codes | events/s |\n| dcgm.nvlink.internal.boundary | nvlink_ppcnt_recovery_time_between_last_two | state |\n| dcgm.nvlink.memory.ecc_error_rate | nvlink_ecc_data_error | errors/s |\n\n### Per nvswitch\n\nThese metrics refer to NVSwitch instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| nvswitch | nvswitch label from exporter metrics. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dcgm.nvswitch.interconnect.nvswitch.current | nvswitch_current_iddq, nvswitch_current_iddq_dvdd, nvswitch_current_iddq_rev | value |\n| dcgm.nvswitch.interconnect.nvswitch.errors | nvswitch_fatal_errors, nvswitch_link_crc_errors, nvswitch_link_crc_errors_lane0, nvswitch_link_crc_errors_lane1, nvswitch_link_crc_errors_lane2, nvswitch_link_crc_errors_lane3, nvswitch_link_crc_errors_lane4, nvswitch_link_crc_errors_lane5, nvswitch_link_crc_errors_lane6, nvswitch_link_crc_errors_lane7, nvswitch_link_fatal_errors, nvswitch_link_flit_errors, nvswitch_link_non_fatal_errors, nvswitch_link_recovery_errors, nvswitch_link_replay_errors, nvswitch_non_fatal_errors | errors/s |\n| dcgm.nvswitch.interconnect.nvswitch.latency | nvswitch_link_latency_count_vc0, nvswitch_link_latency_count_vc1, nvswitch_link_latency_count_vc2, nvswitch_link_latency_count_vc3, nvswitch_link_latency_high_vc0, nvswitch_link_latency_high_vc1, nvswitch_link_latency_high_vc2, nvswitch_link_latency_high_vc3, nvswitch_link_latency_low_vc0, nvswitch_link_latency_low_vc1, nvswitch_link_latency_low_vc2, nvswitch_link_latency_low_vc3, nvswitch_link_latency_medium_vc0, nvswitch_link_latency_medium_vc1, nvswitch_link_latency_medium_vc2, nvswitch_link_latency_medium_vc3, nvswitch_link_latency_panic_vc0, nvswitch_link_latency_panic_vc1, nvswitch_link_latency_panic_vc2, nvswitch_link_latency_panic_vc3 | events/s |\n| dcgm.nvswitch.interconnect.nvswitch.power | nvswitch_power_dvdd, nvswitch_power_hvdd, nvswitch_power_vdd | Watts |\n| dcgm.nvswitch.interconnect.nvswitch.status | nvswitch_link_status, nvswitch_link_type, nvswitch_reset_required | state |\n| dcgm.nvswitch.interconnect.nvswitch.throughput | nvswitch_link_throughput_rx, nvswitch_link_throughput_tx, nvswitch_throughput_rx, nvswitch_throughput_tx | B/s |\n| dcgm.nvswitch.interconnect.nvswitch.topology | nvswitch_device_uuid, nvswitch_link_device_link_id, nvswitch_link_device_link_sid, nvswitch_link_id, nvswitch_link_remote_pcie_bus, nvswitch_link_remote_pcie_device, nvswitch_link_remote_pcie_domain, nvswitch_link_remote_pcie_function, nvswitch_pcie_bus, nvswitch_pcie_device, nvswitch_pcie_domain, nvswitch_pcie_function, nvswitch_phys_id | value |\n| dcgm.nvswitch.interconnect.nvswitch.voltage | nvswitch_voltage_mvolt | mV |\n| dcgm.nvswitch.internal.boundary | first_nvswitch_field_id, last_nvswitch_field_id | state |\n| dcgm.nvswitch.memory.ecc_error_rate | nvswitch_link_ecc_errors, nvswitch_link_ecc_errors_lane0, nvswitch_link_ecc_errors_lane1, nvswitch_link_ecc_errors_lane2, nvswitch_link_ecc_errors_lane3, nvswitch_link_ecc_errors_lane4, nvswitch_link_ecc_errors_lane5, nvswitch_link_ecc_errors_lane6, nvswitch_link_ecc_errors_lane7 | errors/s |\n| dcgm.nvswitch.thermal.temperature | nvswitch_temperature_current, nvswitch_temperature_limit_shutdown, nvswitch_temperature_limit_slowdown | Celsius |\n\n### Per cpu\n\nThese metrics refer to host CPU instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cpu | cpu label from exporter metrics. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dcgm.cpu.clock.frequency | cpu_clock_current | MHz |\n| dcgm.cpu.cpu.info | cpu_model, cpu_vendor | value |\n| dcgm.cpu.cpu.power | cpu_power_limit, cpu_power_util_current | Watts |\n| dcgm.cpu.cpu.temperature | cpu_temp_critical, cpu_temp_current, cpu_temp_warning | Celsius |\n| dcgm.cpu.cpu.utilization | cpu_util, cpu_util_irq, cpu_util_nice, cpu_util_sys, cpu_util_user | % |\n| dcgm.cpu.diagnostics.results | diag_cpu_eud_result | state |\n\n### Per cpu_core\n\nThese metrics refer to host CPU core instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cpu | cpu label from exporter metrics. |\n| cpucore | cpucore label from exporter metrics. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dcgm.cpu_core.clock.frequency | cpu_clock_current | MHz |\n| dcgm.cpu_core.cpu.info | cpu_model, cpu_vendor | value |\n| dcgm.cpu_core.cpu.power | cpu_power_limit, cpu_power_util_current | Watts |\n| dcgm.cpu_core.cpu.temperature | cpu_temp_critical, cpu_temp_current, cpu_temp_warning | Celsius |\n| dcgm.cpu_core.cpu.utilization | cpu_util, cpu_util_irq, cpu_util_nice, cpu_util_sys, cpu_util_user | % |\n| dcgm.cpu_core.diagnostics.results | diag_cpu_eud_result | state |\n\n### Per exporter\n\nThese metrics refer to exporter/global instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| job | job label from exporter metrics. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dcgm.exporter.health.status | bind_unbind_event | state |\n| dcgm.exporter.inventory.software | cuda_driver_version, driver_version, nvml_version | value |\n\n",integration_type:"collector",id:"go.d.plugin-dcgm-Nvidia_Data_Center_GPU_Manager_(DCGM)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/dcgm/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-dmcache",plugin_name:"go.d.plugin",module_name:"dmcache",monitored_instance:{name:"DMCache devices",link:"",icon_filename:"filesystem.svg",categories:["data-collection.storage"]},keywords:["dmcache"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# DMCache devices\n\nPlugin: go.d.plugin\nModule: dmcache\n\n## Overview\n\nThis collector monitors DMCache, providing insights into capacity usage, efficiency, and activity. It relies on the [`dmsetup`](https://man7.org/linux/man-pages/man8/dmsetup.8.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **dmcache** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **dmcache**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/dmcache.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | dmsetup binary execution timeout. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **dmcache** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the dmcache data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _dmcache_ (or scroll the list) to locate the **dmcache** collector.\n5. Click the **+** next to the **dmcache** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/dmcache.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dmcache.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: dmcache\n    update_every: 5  # Collect DMCache statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dmcache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m dmcache\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m dmcache -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dmcache` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dmcache\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dmcache /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dmcache\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dmcache device\n\nThese metrics refer to the DMCache device.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | Device name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dmcache.device_cache_space_usage | free, used | bytes |\n| dmcache.device_metadata_space_usage | free, used | bytes |\n| dmcache.device_cache_read_efficiency | hits, misses | requests/s |\n| dmcache.device_cache_write_efficiency | hits, misses | requests/s |\n| dmcache.device_cache_activity | promotions, demotions | bytes/s |\n| dmcache.device_cache_dirty_size | dirty | bytes |\n\n",integration_type:"collector",id:"go.d.plugin-dmcache-DMCache_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/dmcache/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-dns_query",plugin_name:"go.d.plugin",module_name:"dns_query",monitored_instance:{name:"DNS query",link:"",icon_filename:"network-wired.svg",categories:["data-collection.synthetic-testing"]},keywords:["dns"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# DNS query\n\nPlugin: go.d.plugin\nModule: dns_query\n\n## Overview\n\nThis module monitors DNS query round-trip time (RTT).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **dns_query** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **dns_query**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/dns_query.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="All options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | domains | One or more domains/subdomains to query. A random domain from the list is selected on each iteration. | [] | yes |\n|  | servers | DNS servers to query. If empty, servers from `/etc/resolv.conf` are used automatically. | [] | no |\n|  | timeout | Query timeout (seconds). | 2 | no |\n|  | port | DNS server port. | 53 | no |\n|  | network | DNS query transport protocol. Options: `udp`, `tcp`, `tcp-tls`. | udp | no |\n| **DNS Query** | record_types | DNS record types to query. Options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. | A | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **dns_query** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the dns_query data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _dns_query_ (or scroll the list) to locate the **dns_query** collector.\n5. Click the **+** next to the **dns_query** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/dns_query.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dns_query.conf\n```\n\n##### Examples\n\n###### Specific DNS servers\n\nAn example configuration using Google\'s public DNS servers.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: job1\n    record_types:\n      - A\n      - AAAA\n    domains:\n      - google.com\n      - github.com\n      - reddit.com\n    servers:\n      - 8.8.8.8\n      - 8.8.4.4\n\n```\n{% /details %}\n###### System DNS\n\nAn example configuration using DNS servers from `/etc/resolv.conf`.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: job1\n    record_types:\n      - A\n      - AAAA\n    domains:\n      - google.com\n      - github.com\n      - reddit.com\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dns_query` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m dns_query\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m dns_query -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dns_query` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dns_query\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dns_query /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dns_query\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ dns_query_query_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dns_query.conf) | dns_query.query_status | DNS request type ${label:record_type} to server ${label:server} is unsuccessful |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| server | DNS server address. |\n| network | Network protocol name (tcp, udp, tcp-tls). |\n| record_type | DNS record type (e.g. A, AAAA, CNAME). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dns_query.query_status | success, network_error, dns_error | status |\n| dns_query.query_time | query_time | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-dns_query-DNS_query",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/dnsquery/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-dnsdist",plugin_name:"go.d.plugin",module_name:"dnsdist",monitored_instance:{name:"DNSdist",link:"https://dnsdist.org/",icon_filename:"network-wired.svg",categories:["data-collection.networking"]},keywords:["dnsdist","dns"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# DNSdist\n\nPlugin: go.d.plugin\nModule: dnsdist\n\n## Overview\n\nThis collector monitors DNSDist servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **dnsdist** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **dnsdist**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/dnsdist.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable DNSdist built-in Webserver\n\nFor collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8083 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **dnsdist** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the dnsdist data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _dnsdist_ (or scroll the list) to locate the **dnsdist** collector.\n5. Click the **+** next to the **dnsdist** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/dnsdist.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsdist.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8083\n    headers:\n      X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key).\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8083\n    headers:\n      X-API-Key: \'your-api-key\' # static pre-shared authentication key for access to the REST API (api-key).\n\n  - name: remote\n    url: http://203.0.113.0:8083\n    headers:\n      X-API-Key: \'your-api-key\'\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dnsdist` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m dnsdist\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m dnsdist -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dnsdist` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dnsdist\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dnsdist /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dnsdist\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per DNSdist instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsdist.queries | all, recursive, empty | queries/s |\n| dnsdist.queries_dropped | rule_drop, dynamic_blocked, no_policy, non_queries | queries/s |\n| dnsdist.packets_dropped | acl | packets/s |\n| dnsdist.answers | self_answered, nxdomain, refused, trunc_failures | answers/s |\n| dnsdist.backend_responses | responses | responses/s |\n| dnsdist.backend_commerrors | send_errors | errors/s |\n| dnsdist.backend_errors | timeouts, servfail, non_compliant | responses/s |\n| dnsdist.cache | hits, misses | answers/s |\n| dnsdist.servercpu | system_state, user_state | ms/s |\n| dnsdist.servermem | memory_usage | MiB |\n| dnsdist.query_latency | 1ms, 10ms, 50ms, 100ms, 1sec, slow | queries/s |\n| dnsdist.query_latency_avg | 100, 1k, 10k, 1000k | microseconds |\n\n",integration_type:"collector",id:"go.d.plugin-dnsdist-DNSdist",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/dnsdist/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-dnsmasq",plugin_name:"go.d.plugin",module_name:"dnsmasq",monitored_instance:{name:"Dnsmasq",link:"https://thekelleys.org.uk/dnsmasq/doc.html",icon_filename:"dnsmasq.svg",categories:["data-collection.networking"]},keywords:["dnsmasq","dns"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Dnsmasq\n\nPlugin: go.d.plugin\nModule: dnsmasq\n\n## Overview\n\nThis collector monitors Dnsmasq servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **dnsmasq** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **dnsmasq**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/dnsmasq.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | Dnsmasq server address (`IP:PORT`). | 127.0.0.1:53 | yes |\n|  | protocol | DNS query transport protocol. Options: `udp`, `tcp`, `tcp-tls`. | udp | no |\n|  | timeout | DNS query timeout for dial, write, and read (seconds). | 1 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **dnsmasq** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the dnsmasq data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _dnsmasq_ (or scroll the list) to locate the **dnsmasq** collector.\n5. Click the **+** next to the **dnsmasq** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/dnsmasq.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:53\n\n```\n{% /details %}\n###### Using TCP protocol\n\nLocal server with specific DNS query transport protocol.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:53\n    protocol: tcp\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:53\n\n  - name: remote\n    address: 203.0.113.0:53\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dnsmasq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m dnsmasq\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m dnsmasq -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dnsmasq` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dnsmasq\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dnsmasq /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dnsmasq\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq instance\n\nThe metrics apply to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq.servers_queries | success, failed | queries/s |\n| dnsmasq.cache_performance | hist, misses | events/s |\n| dnsmasq.cache_operations | insertions, evictions | operations/s |\n| dnsmasq.cache_size | size | entries |\n\n",integration_type:"collector",id:"go.d.plugin-dnsmasq-Dnsmasq",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/dnsmasq/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-dnsmasq_dhcp",plugin_name:"go.d.plugin",module_name:"dnsmasq_dhcp",monitored_instance:{name:"Dnsmasq DHCP",link:"https://www.thekelleys.org.uk/dnsmasq/doc.html",icon_filename:"dnsmasq.svg",categories:["data-collection.networking"]},keywords:["dnsmasq","dhcp"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Dnsmasq DHCP\n\nPlugin: go.d.plugin\nModule: dnsmasq_dhcp\n\n## Overview\n\nThis collector monitors  Dnsmasq DHCP leases databases, depending on your configuration.\n\nBy default, it uses:\n\n- `/var/lib/misc/dnsmasq.leases` to read leases.\n- `/etc/dnsmasq.conf` to detect dhcp-ranges.\n- `/etc/dnsmasq.d` to find additional configurations.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAll configured dhcp-ranges are detected automatically\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **dnsmasq_dhcp** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **dnsmasq_dhcp**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/dnsmasq_dhcp.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | leases_path | Path to dnsmasq DHCP leases file. | /var/lib/misc/dnsmasq.leases | no |\n|  | conf_path | Path to dnsmasq configuration file (used to extract configured DHCP pools). | /etc/dnsmasq.conf | no |\n|  | conf_dir | Path to dnsmasq configuration directory (used to extract configured DHCP pools). | /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **dnsmasq_dhcp** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the dnsmasq_dhcp data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _dnsmasq_dhcp_ (or scroll the list) to locate the **dnsmasq_dhcp** collector.\n5. Click the **+** next to the **dnsmasq_dhcp** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/dnsmasq_dhcp.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq_dhcp.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: dnsmasq_dhcp\n    leases_path: /var/lib/misc/dnsmasq.leases\n    conf_path: /etc/dnsmasq.conf\n    conf_dir: /etc/dnsmasq.d\n\n```\n{% /details %}\n###### Pi-hole\n\nDnsmasq DHCP on Pi-hole.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: dnsmasq_dhcp\n    leases_path: /etc/pihole/dhcp.leases\n    conf_path: /etc/dnsmasq.conf\n    conf_dir: /etc/dnsmasq.d\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dnsmasq_dhcp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m dnsmasq_dhcp\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m dnsmasq_dhcp -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dnsmasq_dhcp` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dnsmasq_dhcp\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dnsmasq_dhcp /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dnsmasq_dhcp\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ dnsmasq_dhcp_dhcp_range_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dnsmasq_dhcp.conf) | dnsmasq_dhcp.dhcp_range_utilization | DHCP range utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_ranges | ipv4, ipv6 | ranges |\n| dnsmasq_dhcp.dhcp_hosts | ipv4, ipv6 | hosts |\n\n### Per dhcp range\n\nThese metrics refer to the DHCP range.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| dhcp_range | DHCP range in `START_IP:END_IP` format |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_range_utilization | used | percentage |\n| dnsmasq_dhcp.dhcp_range_allocated_leases | allocated | leases |\n\n",integration_type:"collector",id:"go.d.plugin-dnsmasq_dhcp-Dnsmasq_DHCP",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/dnsmasq_dhcp/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-docker",plugin_name:"go.d.plugin",module_name:"docker",alternative_monitored_instances:[],monitored_instance:{name:"Docker",link:"https://www.docker.com/",categories:["data-collection.containers-and-vms"],icon_filename:"docker.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["container"]},overview:"# Docker\n\nPlugin: go.d.plugin\nModule: docker\n\n## Overview\n\nThis collector monitors Docker containers state, health status and more.\n\n\nIt connects to the Docker instance via a TCP or UNIX socket and executes the following commands:\n\n- [System info](https://docs.docker.com/engine/api/v1.43/#tag/System/operation/SystemInfo).\n- [List images](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageList).\n- [List containers](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nRequires netdata user to be in the docker group.\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker UNIX socket: `/var/run/docker.sock`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nEnabling `collect_container_size` may result in high CPU usage depending on the version of Docker Engine.\n\n",setup:"## Setup\n\n\nYou can configure the **docker** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **docker**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/docker.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | Docker daemon address. For TCP sockets: `tcp://IP:PORT`. | unix:///var/run/docker.sock | yes |\n|  | timeout | Request timeout (seconds). | 2 | no |\n| **Filters** | container_selector | Container selector. Defines which containers to monitor. Uses [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme). | * | no |\n| **Metrics Selection** | collect_container_size | Collect container writable layer size metrics. | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **docker** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the docker data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _docker_ (or scroll the list) to locate the **docker** collector.\n5. Click the **+** next to the **docker** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/docker.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n  - name: local\n    address: 'unix:///var/run/docker.sock'\n\n```\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    address: 'unix:///var/run/docker.sock'\n\n  - name: remote\n    address: 'tcp://203.0.113.10:2375'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `docker` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m docker\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m docker -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `docker` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep docker\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep docker /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep docker\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ docker_container_unhealthy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/docker.conf) | docker.container_health_status | ${label:container_name} docker container health status is unhealthy |\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Containers\n\nRetrieves container list data equivalent to `docker ps -a`.\n\nThis function calls the Docker Container List API with `all=true` and returns both running and non-running containers in a table similar to Docker CLI output.\n\nUse cases:\n- Quickly inspect all containers (running, exited, paused, dead) from Netdata\n- Correlate container lifecycle with metric changes and alerts\n- Verify exposed ports, image tags, and container names without shell access\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Docker:container-ls` |\n| Require Cloud | yes |\n| Performance | Executes a single Docker API request (`ContainerList` with `all=true`):<br/>\u2022 No per-container inspect requests are issued<br/>\u2022 Response size grows with total container count<br/>\u2022 Large histories with many stopped containers may return more rows |\n| Security | Exposes container metadata that may include sensitive details:<br/>\u2022 Container command text may include runtime arguments<br/>\u2022 Image names, ports, and container names are visible<br/>\u2022 Restrict access to authorized operators |\n| Availability | Available when:<br/>\u2022 Docker collector is initialized and connected<br/>\u2022 Docker API list-containers request succeeds<br/>\u2022 Returns HTTP 503 while collector is initializing<br/>\u2022 Returns HTTP 500 on Docker API errors<br/>\u2022 Returns HTTP 504 on timeout |\n\n#### Prerequisites\n\nNo additional configuration is required.\n\n#### Parameters\n\nThis function has no parameters.\n\n#### Returns\n\nContainer inventory from Docker Engine. Each row represents one container returned by `docker ps -a`.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| CONTAINER ID | string |  |  | Short container ID (12 characters). |\n| IMAGE | string |  |  | Container image reference. |\n| COMMAND | string |  |  | Container command as reported by Docker API. |\n| CREATED | string |  |  | Human-readable container creation age (for example, '5 days ago'). |\n| STATUS | string |  |  | Docker status string (for example, 'Up 3 weeks' or 'Exited (0) 4 weeks ago'). |\n| State (Raw) | string |  | hidden | Raw Docker state value (for example, running, exited, paused, restarting, dead). |\n| PORTS | string |  |  | Published or exposed ports summary. |\n| NAMES | string |  |  | Container name. |\n| Container ID (Full) | string |  | hidden | Full 64-character container ID. |\n| Created (Unix) | integer | seconds | hidden | Container creation timestamp in Unix seconds. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.containers_state | running, paused, stopped | containers |\n| docker.containers_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | containers |\n| docker.images | active, dangling | images |\n| docker.images_size | size | bytes |\n\n### Per container\n\nMetrics related to containers. Each container provides its own set of the following metrics.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| container_name | The container's name |\n| image | The image name the container uses |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.container_state | running, paused, exited, created, restarting, removing, dead | state |\n| docker.container_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | status |\n| docker.container_writeable_layer_size | writeable_layer | size |\n\n",integration_type:"collector",id:"go.d.plugin-docker-Docker",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/docker/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-docker_engine",plugin_name:"go.d.plugin",module_name:"docker_engine",alternative_monitored_instances:[],monitored_instance:{name:"Docker Engine",link:"https://docs.docker.com/engine/",categories:["data-collection.containers-and-vms"],icon_filename:"docker.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["docker","container"]},overview:"# Docker Engine\n\nPlugin: go.d.plugin\nModule: docker_engine\n\n## Overview\n\nThis collector monitors the activity and health of Docker Engine and Docker Swarm.\n\n\nThe [built-in](https://docs.docker.com/config/daemon/prometheus/) Prometheus exporter is used to get the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker TCP socket: `http://127.0.0.1:9323/metrics`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **docker_engine** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **docker_engine**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/docker_engine.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:9323/metrics | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **docker_engine** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the docker_engine data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _docker_engine_ (or scroll the list) to locate the **docker_engine** collector.\n5. Click the **+** next to the **docker_engine** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/docker_engine.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker_engine.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9323/metrics\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9323/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nConfiguration with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9323/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9323/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9323/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `docker_engine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m docker_engine\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m docker_engine -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `docker_engine` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep docker_engine\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep docker_engine /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep docker_engine\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Engine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker_engine.engine_daemon_container_actions | changes, commit, create, delete, start | actions/s |\n| docker_engine.engine_daemon_container_states_containers | running, paused, stopped | containers |\n| docker_engine.builder_builds_failed_total | build_canceled, build_target_not_reachable_error, command_not_supported_error, dockerfile_empty_error, dockerfile_syntax_error, error_processing_commands_error, missing_onbuild_arguments_error, unknown_instruction_error | fails/s |\n| docker_engine.engine_daemon_health_checks_failed_total | fails | events/s |\n| docker_engine.swarm_manager_leader | is_leader | bool |\n| docker_engine.swarm_manager_object_store | nodes, services, tasks, networks, secrets, configs | objects |\n| docker_engine.swarm_manager_nodes_per_state | ready, down, unknown, disconnected | nodes |\n| docker_engine.swarm_manager_tasks_per_state | running, failed, ready, rejected, starting, shutdown, new, orphaned, preparing, pending, complete, remove, accepted, assigned | tasks |\n\n",integration_type:"collector",id:"go.d.plugin-docker_engine-Docker_Engine",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/docker_engine/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-dockerhub",plugin_name:"go.d.plugin",module_name:"dockerhub",monitored_instance:{name:"Docker Hub repository",link:"https://hub.docker.com/",icon_filename:"docker.svg",categories:["data-collection.containers-and-vms"]},keywords:["dockerhub"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Docker Hub repository\n\nPlugin: go.d.plugin\nModule: dockerhub\n\n## Overview\n\nThis collector keeps track of DockerHub repositories statistics such as the number of stars, pulls, current status, and more.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\nYou can configure the **dockerhub** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **dockerhub**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/dockerhub.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | repositories | List of Docker Hub repositories to monitor. |  | yes |\n|  | url | DockerHub repositories endpoint URL | https://hub.docker.com/v2/repositories | yes |\n|  | timeout | HTTP request timeout (seconds). | 5 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **dockerhub** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the dockerhub data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _dockerhub_ (or scroll the list) to locate the **dockerhub** collector.\n5. Click the **+** next to the **dockerhub** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/dockerhub.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dockerhub.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: dockerhub\n    repositories:\n      - 'user1/name1'\n      - 'user2/name2'\n      - 'user3/name3'\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dockerhub` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m dockerhub\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m dockerhub -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dockerhub` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dockerhub\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dockerhub /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dockerhub\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Hub repository instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dockerhub.pulls_sum | sum | pulls |\n| dockerhub.pulls | a dimension per repository | pulls |\n| dockerhub.pulls_rate | a dimension per repository | pulls/s |\n| dockerhub.stars | a dimension per repository | stars |\n| dockerhub.status | a dimension per repository | status |\n| dockerhub.last_updated | a dimension per repository | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-dockerhub-Docker_Hub_repository",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/dockerhub/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-dovecot",plugin_name:"go.d.plugin",module_name:"dovecot",monitored_instance:{name:"Dovecot",link:"https://www.dovecot.org/",categories:["data-collection.applications"],icon_filename:"dovecot.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["dovecot","imap","mail"]},overview:"# Dovecot\n\nPlugin: go.d.plugin\nModule: dovecot\n\n## Overview\n\nThis collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.\n\n\nIt reads the server's response to the `EXPORT\\tglobal\\n` command.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAutomatically discovers and collects Dovecot statistics from the following default locations:\n\n- localhost:24242\n- unix:///var/run/dovecot/old-stats\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **dovecot** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **dovecot**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/dovecot.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable old_stats plugin\n\nTo enable `old_stats` plugin, see [Old Statistics](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | Dovecot socket address (Unix or TCP). Used by the [old_stats](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics) plugin. | 127.0.0.1:24242 | yes |\n|  | timeout | Connection, read, write, and name resolution timeout (seconds). | 1 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **dovecot** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the dovecot data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _dovecot_ (or scroll the list) to locate the **dovecot** collector.\n5. Click the **+** next to the **dovecot** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/dovecot.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dovecot.conf\n```\n\n##### Examples\n\n###### Basic (TCP)\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:24242\n\n```\n{% /details %}\n###### Basic (UNIX)\n\nA basic example configuration using a UNIX socket.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: unix:///var/run/dovecot/old-stats\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:24242\n\n  - name: remote\n    address: 203.0.113.0:24242\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `dovecot` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m dovecot\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m dovecot -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `dovecot` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep dovecot\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep dovecot /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep dovecot\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dovecot instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dovecot.session | active | sessions |\n| dovecot.logins | logins | logins |\n| dovecot.auth | ok, failed | attempts/s |\n| dovecot.commands | commands | commands |\n| dovecot.context_switches | voluntary, voluntary | switches/s |\n| dovecot.io | read, write | KiB/s |\n| dovecot.net | read, write | kilobits/s |\n| dovecot.syscalls | read, write | syscalls/s |\n| dovecot.lookup | path, attr | lookups/s |\n| dovecot.cache | hits | hits/s |\n| dovecot.auth_cache | hits, misses | requests/s |\n\n",integration_type:"collector",id:"go.d.plugin-dovecot-Dovecot",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/dovecot/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-elasticsearch",module_name:"elasticsearch",plugin_name:"go.d.plugin",monitored_instance:{name:"Elasticsearch",link:"https://www.elastic.co/elasticsearch/",icon_filename:"elasticsearch.svg",categories:["data-collection.databases"]},keywords:["elastic","elasticsearch","opensearch","search engine"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# Elasticsearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint               | Description          | API                                                                                                         |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/`                    | Node info            |                                                                                                             |\n| `/_nodes/stats`        | Nodes metrics        | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics   | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health`     | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html)   |\n| `/_cluster/stats`      | Cluster metrics      | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html)     |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nElasticsearch can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **elasticsearch** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **elasticsearch**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/elasticsearch.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:9200 | yes |\n|  | timeout | HTTP request timeout (seconds). | 2 | no |\n| **Metrics Selection** | cluster_mode | Collect metrics for all nodes in the cluster (yes) or only the local node (no). | no | no |\n|  | collect_node_stats | Collect node metrics. | yes | no |\n|  | collect_cluster_health | Collect cluster health metrics. | yes | no |\n|  | collect_cluster_stats | Collect cluster stats metrics. | yes | no |\n|  | collect_indices_stats | Collect index metrics. | no | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Functions** | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **elasticsearch** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the elasticsearch data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _elasticsearch_ (or scroll the list) to locate the **elasticsearch** collector.\n5. Click the **+** next to the **elasticsearch** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n\n##### Examples\n\n###### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9200\n\n```\n###### Cluster mode\n\nCluster mode example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9200\n    cluster_mode: yes\n\n```\n{% /details %}\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9200\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9200\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9200\n\n  - name: remote\n    url: http://192.0.2.1:9200\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m elasticsearch\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m elasticsearch -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `elasticsearch` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep elasticsearch\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep elasticsearch /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep elasticsearch\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n",functions:'## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves currently running search tasks from the Elasticsearch [Tasks API](https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html).\n\nThis function queries the `/_tasks` endpoint filtered for search actions (`*search`), providing a real-time snapshot of all active search operations across all nodes in the cluster.\n\nUse cases:\n- Identify long-running search queries that may be impacting cluster performance\n- Monitor active search workload distribution across cluster nodes\n- Debug slow or stuck search operations in real-time\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Elasticsearch:top-queries` |\n| Require Cloud | yes |\n| Performance | Queries the `/_tasks` API filtered for search actions:<br/>\u2022 Lightweight operation with minimal cluster overhead<br/>\u2022 Returns only currently active search tasks, typically a small result set |\n| Security | Task descriptions may contain query details including potentially sensitive information:<br/>\u2022 Index names and search patterns<br/>\u2022 Query terms and filter values<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to Elasticsearch/OpenSearch<br/>\u2022 The user has `monitor` or `manage` cluster privileges<br/>\u2022 Returns HTTP 503 if collector is still initializing<br/>\u2022 Returns HTTP 500 if the Tasks API query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Ensure access to Tasks API\n\nThe user must have appropriate privileges to access the Tasks API.\n\n1. For secured clusters, grant the `monitor` or `manage` cluster privilege:\n\n   ```json\n   {\n     "cluster": ["monitor"]\n   }\n   ```\n\n2. Verify access to the Tasks API:\n\n   ```bash\n   curl -u user:password "http://localhost:9200/_tasks?actions=*search"\n   ```\n\n:::info\n\n- The Tasks API returns only currently running tasks; completed tasks are not stored\n- Search tasks can be cancelled using `POST /_tasks/{task_id}/_cancel` if they are cancellable\n- Works with both Elasticsearch and OpenSearch clusters\n\n:::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Options include running time, start time, and task ID. Defaults to running time to show longest-running searches first. | yes | runningTime |  |\n\n#### Returns\n\nReal-time snapshot of currently executing search tasks across all cluster nodes. Each row represents a single active search operation.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Task ID | string |  | hidden | Unique identifier for the task in format `nodeId:taskId`. Can be used with the Task Management API to cancel long-running tasks. |\n| Node ID | string |  |  | Internal identifier of the node executing this search task. |\n| Node Name | string |  |  | Human-readable name of the node executing the search. Useful for identifying workload distribution across the cluster. |\n| Action | string |  |  | The search action being performed (e.g., `indices:data/read/search`). Indicates the type of search operation. |\n| Type | string |  | hidden | Task type classification (typically `transport` for search tasks). |\n| Description | string |  |  | Detailed description of the search task including indices being searched and query details. Truncated to 4096 characters. |\n| Start Time | timestamp |  |  | Timestamp when the search task started executing. |\n| Running Time | duration | milliseconds |  | Time elapsed since the search started. High values indicate long-running searches that may need investigation or cancellation. |\n| Cancellable | boolean |  | hidden | Whether the task supports cancellation via the Task Management API. |\n| Cancelled | boolean |  | hidden | Whether a cancellation request has been issued for this task. |\n\n',metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n",integration_type:"collector",id:"go.d.plugin-elasticsearch-Elasticsearch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/elasticsearch/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-opensearch",module_name:"elasticsearch",plugin_name:"go.d.plugin",monitored_instance:{name:"OpenSearch",link:"https://opensearch.org/",icon_filename:"opensearch.svg",categories:["data-collection.databases"]},keywords:["elastic","elasticsearch","opensearch","search engine"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# OpenSearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint               | Description          | API                                                                                                         |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/`                    | Node info            |                                                                                                             |\n| `/_nodes/stats`        | Nodes metrics        | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics   | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health`     | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html)   |\n| `/_cluster/stats`      | Cluster metrics      | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html)     |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nOpenSearch can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **elasticsearch** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **elasticsearch**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/elasticsearch.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:9200 | yes |\n|  | timeout | HTTP request timeout (seconds). | 2 | no |\n| **Metrics Selection** | cluster_mode | Collect metrics for all nodes in the cluster (yes) or only the local node (no). | no | no |\n|  | collect_node_stats | Collect node metrics. | yes | no |\n|  | collect_cluster_health | Collect cluster health metrics. | yes | no |\n|  | collect_cluster_stats | Collect cluster stats metrics. | yes | no |\n|  | collect_indices_stats | Collect index metrics. | no | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Functions** | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **elasticsearch** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the elasticsearch data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _elasticsearch_ (or scroll the list) to locate the **elasticsearch** collector.\n5. Click the **+** next to the **elasticsearch** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n\n##### Examples\n\n###### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9200\n\n```\n###### Cluster mode\n\nCluster mode example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9200\n    cluster_mode: yes\n\n```\n{% /details %}\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9200\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9200\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9200\n\n  - name: remote\n    url: http://192.0.2.1:9200\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m elasticsearch\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m elasticsearch -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `elasticsearch` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep elasticsearch\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep elasticsearch /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep elasticsearch\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n",functions:'## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves currently running search tasks from the Elasticsearch [Tasks API](https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html).\n\nThis function queries the `/_tasks` endpoint filtered for search actions (`*search`), providing a real-time snapshot of all active search operations across all nodes in the cluster.\n\nUse cases:\n- Identify long-running search queries that may be impacting cluster performance\n- Monitor active search workload distribution across cluster nodes\n- Debug slow or stuck search operations in real-time\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Elasticsearch:top-queries` |\n| Require Cloud | yes |\n| Performance | Queries the `/_tasks` API filtered for search actions:<br/>\u2022 Lightweight operation with minimal cluster overhead<br/>\u2022 Returns only currently active search tasks, typically a small result set |\n| Security | Task descriptions may contain query details including potentially sensitive information:<br/>\u2022 Index names and search patterns<br/>\u2022 Query terms and filter values<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to Elasticsearch/OpenSearch<br/>\u2022 The user has `monitor` or `manage` cluster privileges<br/>\u2022 Returns HTTP 503 if collector is still initializing<br/>\u2022 Returns HTTP 500 if the Tasks API query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Ensure access to Tasks API\n\nThe user must have appropriate privileges to access the Tasks API.\n\n1. For secured clusters, grant the `monitor` or `manage` cluster privilege:\n\n   ```json\n   {\n     "cluster": ["monitor"]\n   }\n   ```\n\n2. Verify access to the Tasks API:\n\n   ```bash\n   curl -u user:password "http://localhost:9200/_tasks?actions=*search"\n   ```\n\n:::info\n\n- The Tasks API returns only currently running tasks; completed tasks are not stored\n- Search tasks can be cancelled using `POST /_tasks/{task_id}/_cancel` if they are cancellable\n- Works with both Elasticsearch and OpenSearch clusters\n\n:::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Options include running time, start time, and task ID. Defaults to running time to show longest-running searches first. | yes | runningTime |  |\n\n#### Returns\n\nReal-time snapshot of currently executing search tasks across all cluster nodes. Each row represents a single active search operation.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Task ID | string |  | hidden | Unique identifier for the task in format `nodeId:taskId`. Can be used with the Task Management API to cancel long-running tasks. |\n| Node ID | string |  |  | Internal identifier of the node executing this search task. |\n| Node Name | string |  |  | Human-readable name of the node executing the search. Useful for identifying workload distribution across the cluster. |\n| Action | string |  |  | The search action being performed (e.g., `indices:data/read/search`). Indicates the type of search operation. |\n| Type | string |  | hidden | Task type classification (typically `transport` for search tasks). |\n| Description | string |  |  | Detailed description of the search task including indices being searched and query details. Truncated to 4096 characters. |\n| Start Time | timestamp |  |  | Timestamp when the search task started executing. |\n| Running Time | duration | milliseconds |  | Time elapsed since the search started. High values indicate long-running searches that may need investigation or cancellation. |\n| Cancellable | boolean |  | hidden | Whether the task supports cancellation via the Task Management API. |\n| Cancelled | boolean |  | hidden | Whether a cancellation request has been issued for this task. |\n\n',metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n",integration_type:"collector",id:"go.d.plugin-elasticsearch-OpenSearch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/elasticsearch/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-envoy",plugin_name:"go.d.plugin",module_name:"envoy",monitored_instance:{name:"Envoy",link:"https://www.envoyproxy.io/",icon_filename:"envoy.svg",categories:["data-collection.web-servers-and-proxies"]},keywords:["envoy","proxy"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# Envoy\n\nPlugin: go.d.plugin\nModule: envoy\n\n## Overview\n\nThis collector monitors Envoy proxies. It collects server, cluster, and listener metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nEnvoy can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Envoy instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **envoy** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **envoy**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/envoy.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:9091/stats/prometheus | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **envoy** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the envoy data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _envoy_ (or scroll the list) to locate the **envoy** collector.\n5. Click the **+** next to the **envoy** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/envoy.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/envoy.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9901/stats/prometheus\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9901/stats/prometheus\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9901/stats/prometheus\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9901/stats/prometheus\n\n  - name: remote\n    url: http://192.0.2.1:9901/stats/prometheus\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `envoy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m envoy\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m envoy -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `envoy` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep envoy\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep envoy /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep envoy\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Envoy instance\n\nEnvoy exposes metrics in Prometheus format. All metric labels are added to charts.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| envoy.server_state | live, draining, pre_initializing, initializing | state |\n| envoy.server_connections_count | connections | connections |\n| envoy.server_parent_connections_count | connections | connections |\n| envoy.server_memory_allocated_size | allocated | bytes |\n| envoy.server_memory_heap_size | heap | bytes |\n| envoy.server_memory_physical_size | physical | bytes |\n| envoy.server_uptime | uptime | seconds |\n| envoy.cluster_manager_cluster_count | active, not_active | clusters |\n| envoy.cluster_manager_cluster_changes_rate | added, modified, removed | clusters/s |\n| envoy.cluster_manager_cluster_updates_rate | cluster | updates/s |\n| envoy.cluster_manager_cluster_updated_via_merge_rate | via_merge | updates/s |\n| envoy.cluster_manager_update_merge_cancelled_rate | merge_cancelled | updates/s |\n| envoy.cluster_manager_update_out_of_merge_window_rate | out_of_merge_window | updates/s |\n| envoy.cluster_membership_endpoints_count | healthy, degraded, excluded | endpoints |\n| envoy.cluster_membership_changes_rate | membership | changes/s |\n| envoy.cluster_membership_updates_rate | success, failure, empty, no_rebuild | updates/s |\n| envoy.cluster_upstream_cx_active_count | active | connections |\n| envoy.cluster_upstream_cx_rate | created | connections/s |\n| envoy.cluster_upstream_cx_http_rate | http1, http2, http3 | connections/s |\n| envoy.cluster_upstream_cx_destroy_rate | local, remote | connections/s |\n| envoy.cluster_upstream_cx_connect_fail_rate | failed | connections/s |\n| envoy.cluster_upstream_cx_connect_timeout_rate | timeout | connections/s |\n| envoy.cluster_upstream_cx_bytes_rate | received, sent | bytes/s |\n| envoy.cluster_upstream_cx_bytes_buffered_size | received, send | bytes |\n| envoy.cluster_upstream_rq_active_count | active | requests |\n| envoy.cluster_upstream_rq_rate | requests | requests/s |\n| envoy.cluster_upstream_rq_failed_rate | cancelled, maintenance_mode, timeout, max_duration_reached, per_try_timeout, reset_local, reset_remote | requests/s |\n| envoy.cluster_upstream_rq_pending_active_count | active_pending | requests |\n| envoy.cluster_upstream_rq_pending_rate | pending | requests/s |\n| envoy.cluster_upstream_rq_pending_failed_rate | overflow, failure_eject | requests/s |\n| envoy.cluster_upstream_rq_retry_rate | request | retries/s |\n| envoy.cluster_upstream_rq_retry_success_rate | success | retries/s |\n| envoy.cluster_upstream_rq_retry_backoff_rate | exponential, ratelimited | retries/s |\n| envoy.listener_manager_listeners_count | active, warming, draining | listeners |\n| envoy.listener_manager_listener_changes_rate | added, modified, removed, stopped | listeners/s |\n| envoy.listener_manager_listener_object_events_rate | create_success, create_failure, in_place_updated | objects/s |\n| envoy.listener_admin_downstream_cx_active_count | active | connections |\n| envoy.listener_admin_downstream_cx_rate | created | connections/s |\n| envoy.listener_admin_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_admin_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_admin_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_admin_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_admin_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_admin_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n| envoy.listener_downstream_cx_active_count | active | connections |\n| envoy.listener_downstream_cx_rate | created | connections/s |\n| envoy.listener_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n\n",integration_type:"collector",id:"go.d.plugin-envoy-Envoy",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/envoy/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-ethtool",plugin_name:"go.d.plugin",module_name:"ethtool",monitored_instance:{name:"Optical modules",link:"",icon_filename:"network-wired.svg",categories:["data-collection.networking"]},keywords:["sfp","ddm","optic"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Optical modules\n\nPlugin: go.d.plugin\nModule: ethtool\n\n## Overview\n\nThis collector monitors optical transceiver modules' diagnostic parameters  (temperature, voltage, laser bias current, transmit/receive power levels) from network interfaces  equipped with modules that support Digital Diagnostic Monitoring (DDM). It relies on the [`ethtool`](https://man7.org/linux/man-pages/man8/ethtool.8.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **ethtool** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **ethtool**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/ethtool.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n| optical_interfaces | Space-separated list of optical interface names which must have optical transceiver modules with [DDM](https://en.wikipedia.org/wiki/Small_Form-factor_Pluggable#Digital_diagnostics_monitoring). |  | yes |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **ethtool** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the ethtool data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _ethtool_ (or scroll the list) to locate the **ethtool** collector.\n5. Click the **+** next to the **ethtool** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/ethtool.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ethtool.conf\n```\n\n##### Examples\n\n###### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: ethtool\n    binary_path: /usr/local/sbin/ethtool\n    optical_interfaces: "enp1s0 enp1s1 enp2s0"\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ethtool` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m ethtool\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m ethtool -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ethtool` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ethtool\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ethtool /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ethtool\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Optical Transceiver Module\n\nMetrics collected from optical transceiver modules that support Digital Diagnostic Monitoring (DDM).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| iface | Network interface name where the optical transceiver module is installed. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ethtool.optical_module_receiver_signal_power | rx_power | dBm |\n| ethtool.optical_module_laser_output_power | tx_power | dBm |\n| ethtool.optical_module_laser_bias_current | bias_current | mA |\n| ethtool.optical_module_temperature | temperature | Celsius |\n| ethtool.optical_module_voltage | voltage | Volts |\n\n",integration_type:"collector",id:"go.d.plugin-ethtool-Optical_modules",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/ethtool/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-exim",plugin_name:"go.d.plugin",module_name:"exim",monitored_instance:{name:"Exim",link:"https://www.exim.org/",icon_filename:"exim.jpg",categories:["data-collection.applications"]},keywords:["exim","mail","email"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Exim\n\nPlugin: go.d.plugin\nModule: exim\n\n## Overview\n\nThis collector monitors Exim mail queue. It relies on the [`exim`](https://www.exim.org/exim-html-3.20/doc/html/spec_5.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\nExecuted commands:\n- `exim -bpc`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **exim** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **exim**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/exim.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | exim binary execution timeout. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **exim** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the exim data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _exim_ (or scroll the list) to locate the **exim** collector.\n5. Click the **+** next to the **exim** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/exim.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/exim.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: exim\n    update_every: 5  # Collect logical volume statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `exim` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m exim\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m exim -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `exim` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep exim\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep exim /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep exim\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Exim instance\n\nThese metrics refer to the the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exim.qemails | emails | emails |\n\n",integration_type:"collector",id:"go.d.plugin-exim-Exim",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/exim/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-fail2ban",plugin_name:"go.d.plugin",module_name:"fail2ban",monitored_instance:{name:"Fail2ban",link:"https://github.com/fail2ban/fail2ban#readme",icon_filename:"fail2ban.png",categories:["data-collection.applications"]},keywords:["fail2ban","security","authentication","authorization"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Fail2ban\n\nPlugin: go.d.plugin\nModule: fail2ban\n\n## Overview\n\nThis collector tracks two main metrics for each jail: currently banned IPs and active failure incidents. It relies on the [`fail2ban-client`](https://linux.die.net/man/1/fail2ban-client) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **fail2ban** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **fail2ban**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/fail2ban.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### For Netdata running in a Docker container\n\n1. **Install Fail2ban client**.\n\n    Ensure `fail2ban-client` is available in the container by setting the environment variable `NETDATA_EXTRA_DEB_PACKAGES=fail2ban` when starting the container.\n\n2. **Mount host\'s `/var/run` directory**.\n\n    Mount the host machine\'s `/var/run` directory to `/host/var/run` inside your Netdata container. This grants Netdata access to the Fail2ban socket file, typically located at `/var/run/fail2ban/fail2ban.sock`.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | fail2ban-client binary execution timeout. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **fail2ban** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the fail2ban data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _fail2ban_ (or scroll the list) to locate the **fail2ban** collector.\n5. Click the **+** next to the **fail2ban** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/fail2ban.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fail2ban.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: fail2ban\n    update_every: 5  # Collect Fail2Ban jails statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `fail2ban` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m fail2ban\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m fail2ban -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `fail2ban` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep fail2ban\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep fail2ban /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep fail2ban\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per jail\n\nThese metrics refer to the Jail.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| jail | Jail's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fail2ban.jail_banned_ips | banned | addresses |\n| fail2ban.jail_active_failures | active_failures | failures |\n\n",integration_type:"collector",id:"go.d.plugin-fail2ban-Fail2ban",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/fail2ban/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-filecheck",plugin_name:"go.d.plugin",module_name:"filecheck",monitored_instance:{name:"Files and directories",link:"",icon_filename:"filesystem.svg",categories:["data-collection.synthetic-testing"]},keywords:["files","directories"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Files and directories\n\nPlugin: go.d.plugin\nModule: filecheck\n\n## Overview\n\nThis collector monitors the existence, last modification time, and size of arbitrary files and directories on the system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the DAC_READ_SEARCH capability when monitoring files not normally accessible to the Netdata user, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\nYou can configure the **filecheck** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **filecheck**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/filecheck.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | [files](#option-target-files) | File selector. Defines which files to monitor. |  | yes |\n|  | [dirs](#option-target-dirs) | Directory selector. Defines which directories to monitor. |  | yes |\n| **Discovery** | discovery_every | Files and directories discovery interval (seconds). | 60 | no |\n\n<a id=\"option-target-files\"></a>\n##### files\n\nFiles matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 OR pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nfiles:\n  includes:\n    - pattern1\n    - pattern2\n  excludes:\n    - pattern3\n    - pattern4\n```\n\n\n<a id=\"option-target-dirs\"></a>\n##### dirs\n\nDirectories matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 OR pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\ndirs:\n  includes:\n    - pattern1\n    - pattern2\n  excludes:\n    - pattern3\n    - pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **filecheck** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the filecheck data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _filecheck_ (or scroll the list) to locate the **filecheck** collector.\n5. Click the **+** next to the **filecheck** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/filecheck.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/filecheck.conf\n```\n\n##### Examples\n\n###### Files\n\nFiles monitoring example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: files_example\n    files:\n      include:\n        - '/path/to/file1'\n        - '/path/to/file2'\n        - '/path/to/*.log'\n\n```\n{% /details %}\n###### Directories\n\nDirectories monitoring example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: files_example\n    dirs:\n      collect_dir_size: no\n      include:\n        - '/path/to/dir1'\n        - '/path/to/dir2'\n        - '/path/to/dir3*'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `filecheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m filecheck\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m filecheck -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `filecheck` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep filecheck\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep filecheck /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep filecheck\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per file\n\nThese metrics refer to the File.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| file_path | File absolute path |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filecheck.file_existence_status | exist, not_exist | status |\n| filecheck.file_modification_time_ago | mtime_ago | seconds |\n| filecheck.file_size_bytes | size | bytes |\n\n### Per directory\n\nThese metrics refer to the Directory.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| dir_path | Directory absolute path |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filecheck.dir_existence_status | exist, not_exist | status |\n| filecheck.dir_modification_time_ago | mtime_ago | seconds |\n| filecheck.dir_size_bytes | size | bytes |\n| filecheck.dir_files count | files | files |\n\n",integration_type:"collector",id:"go.d.plugin-filecheck-Files_and_directories",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/filecheck/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-fluentd",plugin_name:"go.d.plugin",module_name:"fluentd",monitored_instance:{name:"Fluentd",link:"https://www.fluentd.org/",icon_filename:"fluentd.svg",categories:["data-collection.applications"]},keywords:["fluentd","logging"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Fluentd\n\nPlugin: go.d.plugin\nModule: fluentd\n\n## Overview\n\nThis collector monitors Fluentd servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **fluentd** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **fluentd**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/fluentd.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable monitor agent\n\nTo enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:24220 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **fluentd** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the fluentd data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _fluentd_ (or scroll the list) to locate the **fluentd** collector.\n5. Click the **+** next to the **fluentd** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/fluentd.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fluentd.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:24220\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:24220\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nFluentd with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:24220\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:24220\n\n  - name: remote\n    url: http://192.0.2.1:24220\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `fluentd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m fluentd\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m fluentd -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `fluentd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep fluentd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep fluentd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep fluentd\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Fluentd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fluentd.retry_count | a dimension per plugin | count |\n| fluentd.buffer_queue_length | a dimension per plugin | queue_length |\n| fluentd.buffer_total_queued_size | a dimension per plugin | queued_size |\n\n",integration_type:"collector",id:"go.d.plugin-fluentd-Fluentd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/fluentd/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-freeradius",plugin_name:"go.d.plugin",module_name:"freeradius",monitored_instance:{name:"FreeRADIUS",link:"https://freeradius.org/",categories:["data-collection.applications"],icon_filename:"freeradius.svg"},keywords:["freeradius","radius"],info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# FreeRADIUS\n\nPlugin: go.d.plugin\nModule: freeradius\n\n## Overview\n\nThis collector monitors FreeRADIUS servers.\n\nIt collect metrics by sending [status-server](https://wiki.freeradius.org/config/Status) messages to the server.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects FreeRadius instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **freeradius** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **freeradius**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/freeradius.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable status server\n\nTo enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | FreeRADIUS server address. | 127.0.0.1 | yes |\n|  | timeout | Connection, read, and write timeout (seconds). | 1 | no |\n|  | port | FreeRADIUS server port. | 18121 | no |\n| **Auth** | secret | FreeRADIUS shared secret. | adminsecret | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **freeradius** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the freeradius data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _freeradius_ (or scroll the list) to locate the **freeradius** collector.\n5. Click the **+** next to the **freeradius** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/freeradius.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/freeradius.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1\n    port: 18121\n    secert: adminsecret\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1\n    port: 18121\n    secert: adminsecret\n\n  - name: remote\n    address: 192.0.2.1\n    port: 18121\n    secert: adminsecret\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `freeradius` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m freeradius\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m freeradius -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `freeradius` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep freeradius\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep freeradius /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep freeradius\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per FreeRADIUS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| freeradius.authentication | requests, responses | packets/s |\n| freeradius.authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_authentication | requests, responses | packets/s |\n| freeradius.proxy_authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.proxy_bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.accounting | requests, responses | packets/s |\n| freeradius.bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_accounting | requests, responses | packets/s |\n| freeradius.proxy_bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n\n",integration_type:"collector",id:"go.d.plugin-freeradius-FreeRADIUS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/freeradius/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-gearman",plugin_name:"go.d.plugin",module_name:"gearman",monitored_instance:{name:"Gearman",link:"https://gearman.org/",categories:["data-collection.applications"],icon_filename:"gearman.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["gearman"]},overview:"# Gearman\n\nPlugin: go.d.plugin\nModule: gearman\n\n## Overview\n\nMonitors jobs activity, priority and available workers. It collects summary and function-specific statistics.\n\n\nThis collector connects to a Gearman instance via TCP socket and executes the following commands:\n\n- status\n- priority-status\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Gearman instances running on localhost that are listening on port 4730.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **gearman** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **gearman**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/gearman.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | Gearman server address (`IP:PORT`). | 127.0.0.1:11211 | yes |\n|  | timeout | Connection, read, write, and name resolution timeout (seconds). | 1 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **gearman** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the gearman data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _gearman_ (or scroll the list) to locate the **gearman** collector.\n5. Click the **+** next to the **gearman** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/gearman.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/gearman.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:4730\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:4730\n\n  - name: remote\n    address: 203.0.113.0:4730\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `gearman` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m gearman\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m gearman -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `gearman` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep gearman\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep gearman /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep gearman\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Gearman instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.queued_jobs_activity | running, waiting | jobs |\n| gearman.queued_jobs_priority | high, normal, low | jobs |\n\n### Per Gearman instance\n\nThese metrics refer to the Function (task).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| function_name | Function name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.function_queued_jobs_activity | running, waiting | jobs |\n| gearman.function_queued_jobs_priority | high, normal, low | jobs |\n| gearman.function_workers | available | workers |\n\n",integration_type:"collector",id:"go.d.plugin-gearman-Gearman",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/gearman/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-geth",plugin_name:"go.d.plugin",module_name:"geth",monitored_instance:{name:"Go-ethereum",link:"https://github.com/ethereum/go-ethereum",icon_filename:"geth.png",categories:["data-collection.applications"]},keywords:["geth","ethereum","blockchain"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# Go-ethereum\n\nPlugin: go.d.plugin\nModule: geth\n\n## Overview\n\nThis collector monitors Go-ethereum instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nGo-ethereum can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Go-ethereum instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **geth** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **geth**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/geth.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:6060/debug/metrics/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. |  | no |\n| password | Password for basic HTTP authentication. |  | no |\n| proxy_url | Proxy URL. |  | no |\n| proxy_username | Username for proxy basic HTTP authentication. |  | no |\n| proxy_password | Password for proxy basic HTTP authentication. |  | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. |  | no |\n| headers | HTTP request headers. |  | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server\'s certificates. |  | no |\n| tls_cert | Client TLS certificate. |  | no |\n| tls_key | Client TLS key. |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **geth** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the geth data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _geth_ (or scroll the list) to locate the **geth** collector.\n5. Click the **+** next to the **geth** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/geth.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/geth.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:6060/debug/metrics/prometheus\n    username: username\n    password: password\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n  - name: remote\n    url: http://192.0.2.1:6060/debug/metrics/prometheus\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `geth` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m geth\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m geth -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `geth` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep geth\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep geth /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep geth\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go-ethereum instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| geth.eth_db_chaindata_ancient_io_rate | reads, writes | bytes/s |\n| geth.eth_db_chaindata_ancient_io | reads, writes | bytes |\n| geth.eth_db_chaindata_disk_io | reads, writes | bytes |\n| geth.goroutines | goroutines | goroutines |\n| geth.eth_db_chaindata_disk_io_rate | reads, writes | bytes/s |\n| geth.chaindata_db_size | level_db, ancient_db | bytes |\n| geth.chainhead | block, receipt, header | block |\n| geth.tx_pool_pending | invalid, pending, local, discard, no_funds, ratelimit, replace | transactions |\n| geth.tx_pool_current | invalid, pending, local, pool | transactions |\n| geth.tx_pool_queued | discard, eviction, no_funds, ratelimit | transactions |\n| geth.p2p_bandwidth | ingress, egress | bytes/s |\n| geth.reorgs | executed | reorgs |\n| geth.reorgs_blocks | added, dropped | blocks |\n| geth.p2p_peers | peers | peers |\n| geth.p2p_peers_calls | dials, serves | calls/s |\n| geth.rpc_calls | failed, successful | calls/s |\n\n",integration_type:"collector",id:"go.d.plugin-geth-Go-ethereum",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/geth/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-haproxy",plugin_name:"go.d.plugin",module_name:"haproxy",monitored_instance:{name:"HAProxy",link:"https://www.haproxy.org/",icon_filename:"haproxy.svg",categories:["data-collection.web-servers-and-proxies"]},keywords:["haproxy","web","webserver","http","proxy"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# HAProxy\n\nPlugin: go.d.plugin\nModule: haproxy\n\n## Overview\n\nThis collector monitors HAProxy servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **haproxy** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **haproxy**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/haproxy.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable PROMEX addon.\n\nTo enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8404/metrics | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **haproxy** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the haproxy data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _haproxy_ (or scroll the list) to locate the **haproxy** collector.\n5. Click the **+** next to the **haproxy** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/haproxy.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/haproxy.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8404/metrics\n\n```\n{% /details %}\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8404/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:8404/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8404/metrics\n\n  - name: remote\n    url: http://192.0.2.1:8404/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `haproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m haproxy\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m haproxy -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `haproxy` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep haproxy\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep haproxy /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep haproxy\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HAProxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_current_sessions | a dimension per proxy | sessions |\n| haproxy.backend_sessions | a dimension per proxy | sessions/s |\n| haproxy.backend_response_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_queue_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_current_queue | a dimension per proxy | requests |\n\n### Per proxy\n\nThese metrics refer to the Proxy.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_http_responses | 1xx, 2xx, 3xx, 4xx, 5xx, other | responses/s |\n| haproxy.backend_network_io | in, out | bytes/s |\n\n",integration_type:"collector",id:"go.d.plugin-haproxy-HAProxy",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/haproxy/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-hddtemp",plugin_name:"go.d.plugin",module_name:"hddtemp",monitored_instance:{name:"HDD temperature",link:"https://linux.die.net/man/8/hddtemp",categories:["data-collection.storage"],icon_filename:"hard-drive.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["hardware","hdd temperature","disk temperature","temperature"]},overview:"# HDD temperature\n\nPlugin: go.d.plugin\nModule: hddtemp\n\n## Overview\n\nThis collector monitors disk temperatures.\n\n\nIt retrieves temperature data for attached disks by querying the hddtemp daemon at regular intervals.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **hddtemp** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **hddtemp**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/hddtemp.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install hddtemp\n\nInstall `hddtemp` using your distribution\'s package manager.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | hddtemp server address (`IP:PORT`). | 127.0.0.1:7634 | yes |\n|  | timeout | Connection, read, write, and name resolution timeout (seconds). | 1 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **hddtemp** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the hddtemp data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _hddtemp_ (or scroll the list) to locate the **hddtemp** collector.\n5. Click the **+** next to the **hddtemp** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/hddtemp.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hddtemp.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:7634\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:7634\n\n  - name: remote\n    address: 203.0.113.0:7634\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `hddtemp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m hddtemp\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m hddtemp -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `hddtemp` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep hddtemp\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep hddtemp /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep hddtemp\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\nThese metrics refer to the Disk.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| disk_id | Disk identifier. It is derived from the device path (e.g. sda or ata-HUP722020APA330_BFJ0WS3F) |\n| model | Disk model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hddtemp.disk_temperature | temperature | Celsius |\n| hddtemp.disk_temperature_sensor_status | ok, err, na, unk, nos, slp | status |\n\n",integration_type:"collector",id:"go.d.plugin-hddtemp-HDD_temperature",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/hddtemp/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-hfs",plugin_name:"go.d.plugin",module_name:"hfs",monitored_instance:{name:"Hadoop Distributed File System (HDFS)",link:"https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html",icon_filename:"hadoop.svg",categories:["data-collection.storage"]},keywords:["hdfs","hadoop"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Hadoop Distributed File System (HDFS)\n\nPlugin: go.d.plugin\nModule: hfs\n\n## Overview\n\nThis collector monitors HDFS nodes.\n\nNetdata accesses HDFS metrics over `Java Management Extensions` (JMX) through the web interface of an HDFS daemon.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **hfs** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **hfs**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/hdfs.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:9870/jmx | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **hfs** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the hfs data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _hfs_ (or scroll the list) to locate the **hfs** collector.\n5. Click the **+** next to the **hfs** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/hdfs.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hdfs.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9870/jmx\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9870/jmx\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9870/jmx\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9870/jmx\n\n  - name: remote\n    url: http://192.0.2.1:9870/jmx\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `hfs` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m hfs\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m hfs -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `hfs` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep hfs\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep hfs /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep hfs\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ hdfs_capacity_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.capacity | summary datanodes space capacity utilization |\n| [ hdfs_missing_blocks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.blocks | number of missing blocks |\n| [ hdfs_stale_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes marked stale due to delayed heartbeat |\n| [ hdfs_dead_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes which are currently dead |\n| [ hdfs_num_failed_volumes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.num_failed_volumes | number of failed volumes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Hadoop Distributed File System (HDFS) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | DataNode | NameNode |\n|:------|:----------|:----|:---:|:---:|\n| hdfs.heap_memory | committed, used | MiB | \u2022 | \u2022 |\n| hdfs.gc_count_total | gc | events/s | \u2022 | \u2022 |\n| hdfs.gc_time_total | ms | ms | \u2022 | \u2022 |\n| hdfs.gc_threshold | info, warn | events/s | \u2022 | \u2022 |\n| hdfs.threads | new, runnable, blocked, waiting, timed_waiting, terminated | num | \u2022 | \u2022 |\n| hdfs.logs_total | info, error, warn, fatal | logs/s | \u2022 | \u2022 |\n| hdfs.rpc_bandwidth | received, sent | kilobits/s | \u2022 | \u2022 |\n| hdfs.rpc_calls | calls | calls/s | \u2022 | \u2022 |\n| hdfs.open_connections | open | connections | \u2022 | \u2022 |\n| hdfs.call_queue_length | length | num | \u2022 | \u2022 |\n| hdfs.avg_queue_time | time | ms | \u2022 | \u2022 |\n| hdfs.avg_processing_time | time | ms | \u2022 | \u2022 |\n| hdfs.capacity | remaining, used | KiB |   | \u2022 |\n| hdfs.used_capacity | dfs, non_dfs | KiB |   | \u2022 |\n| hdfs.load | load | load |   | \u2022 |\n| hdfs.volume_failures_total | failures | events/s |   | \u2022 |\n| hdfs.files_total | files | num |   | \u2022 |\n| hdfs.blocks_total | blocks | num |   | \u2022 |\n| hdfs.blocks | corrupt, missing, under_replicated | num |   | \u2022 |\n| hdfs.data_nodes | live, dead, stale | num |   | \u2022 |\n| hdfs.datanode_capacity | remaining, used | KiB | \u2022 |   |\n| hdfs.datanode_used_capacity | dfs, non_dfs | KiB | \u2022 |   |\n| hdfs.datanode_failed_volumes | failed volumes | num | \u2022 |   |\n| hdfs.datanode_bandwidth | reads, writes | KiB/s | \u2022 |   |\n\n",integration_type:"collector",id:"go.d.plugin-hfs-Hadoop_Distributed_File_System_(HDFS)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/hdfs/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-hpssa",plugin_name:"go.d.plugin",module_name:"hpssa",monitored_instance:{name:"HPE Smart Arrays",link:"https://buy.hpe.com/us/en/options/controller-controller-options/smart-array-controllers-smart-host-bus-adapters/c/7109730",icon_filename:"hp.svg",categories:["data-collection.storage"]},keywords:["storage","raid-controller","hp","hpssa","array"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# HPE Smart Arrays\n\nPlugin: go.d.plugin\nModule: hpssa\n\n## Overview\n\nMonitors the health of HPE Smart Arrays by tracking the status of controllers, arrays, logical and physical drives in your storage system.\nIt relies on the `ssacli` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n-  `ssacli ctrl all show config detail`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **hpssa** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **hpssa**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/ssacli.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install ssacli\n\nSee [official installation instructions](https://support.hpe.com/connect/s/softwaredetails?language=en_US&collectionId=MTX-0cb3f808e2514d3d).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | ssacli binary execution timeout. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **hpssa** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the hpssa data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _hpssa_ (or scroll the list) to locate the **hpssa** collector.\n5. Click the **+** next to the **hpssa** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/ssacli.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ssacli.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: hpssa\n    update_every: 5  # Collect HPE Smart Array statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `hpssa` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m hpssa\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m hpssa -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `hpssa` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep hpssa\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep hpssa /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep hpssa\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Controller.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| slot | Slot number |\n| model | Controller model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.controller_status | ok, nok | status |\n| hpssa.controller_temperature | temperature | Celsius |\n| hpssa.controller_cache_module_presence_status | present, not_present | status |\n| hpssa.controller_cache_module_status | ok, nok | status |\n| hpssa.controller_cache_module_temperature | temperature | Celsius |\n| hpssa.controller_cache_module_battery_status | ok, nok | status |\n\n### Per array\n\nThese metrics refer to the Array.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| slot | Slot number |\n| array_id | Array id |\n| interface_type | Array interface type (e.g. SATA) |\n| array_type | Array type (e.g. Data) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.array_status | ok, nok | status |\n\n### Per logical drive\n\nThese metrics refer to the Logical Drive.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| slot | Slot number |\n| array_id | Array id |\n| logical_drive_id | Logical Drive id (number) |\n| disk_name | Disk name (e.g. /dev/sda) |\n| drive_type | Drive type (e.g. Data) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.logical_drive_status | ok, nok | status |\n\n### Per physical drive\n\nThese metrics refer to the Physical Drive.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| slot | Slot number |\n| array_id | Array id or "na" if unassigned |\n| logical_drive_id | Logical Drive id or "na" if unassigned |\n| location | Drive location in port:box:bay format (e.g. 1I:1:1) |\n| interface_type | Drive interface type (e.g. SATA) |\n| drive_type | Drive type (e.g. Data Drive, Unassigned Drive) |\n| model | Drive model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.physical_drive_status | ok, nok | status |\n| hpssa.physical_drive_temperature | temperature | status |\n\n',integration_type:"collector",id:"go.d.plugin-hpssa-HPE_Smart_Arrays",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/hpssa/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-httpcheck",plugin_name:"go.d.plugin",module_name:"httpcheck",monitored_instance:{name:"HTTP Endpoints",link:"",icon_filename:"globe.svg",categories:["data-collection.synthetic-testing"]},keywords:["webserver"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# HTTP Endpoints\n\nPlugin: go.d.plugin\nModule: httpcheck\n\n## Overview\n\nThis collector monitors HTTP servers availability status and response time.\n\nPossible statuses:\n\n| Status        | Description                                                                                                                                                                                  |\n|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| success       | HTTP request completed successfully with a status code matching the configured `status_accepted` range (default: 200), and the response body and headers (if configured) match expectations. |\n| timeout       | HTTP request timed out before receiving a response (default: 1 second).                                                                                                                      |\n| no_connection | Failed to establish a connection to the target.                                                                                                                                              |\n| redirect      | Received a redirect response (3xx status code) while `not_follow_redirects` is configured.                                                                                                   |\n| bad_status    | HTTP request completed with a status code outside the configured `status_accepted` range (default: non-200).                                                                                 |\n| bad_content   | HTTP request completed successfully but the response body does not match the expected content (when using `response_match`).                                                                 |\n| bad_header    | HTTP request completed successfully but response headers do not match the expected values (when using `header_match`).                                                                      |\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **httpcheck** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **httpcheck**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/httpcheck.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **Validation** | status_accepted | HTTP accepted response statuses. Anything else results in \'bad status\' in the status chart. | [200] | no |\n|  | response_match | If the status code is accepted, match the response body against this regular expression. |  | no |\n|  | header_match | A set of rules to check for specific key-value pairs in response headers. | [] | no |\n|  | header_match.exclude | When yes, the rule asserts the key-value pair must be absent. | no | no |\n|  | header_match.key | Exact HTTP header name to check. |  | yes |\n|  | header_match.value | The [pattern](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format) to match against the header\'s value. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | cookie_file | Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **httpcheck** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the httpcheck data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _httpcheck_ (or scroll the list) to locate the **httpcheck** collector.\n5. Click the **+** next to the **httpcheck** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/httpcheck.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/httpcheck.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080\n\n```\n{% /details %}\n###### With HTTP request headers\n\nConfiguration with HTTP request headers that will be sent by the client.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080\n    headers:\n      Host: localhost:8080\n      User-Agent: netdata/go.d.plugin\n      Accept: */*\n\n```\n{% /details %}\n###### With `status_accepted`\n\nA basic example configuration with non-default status_accepted.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080\n    status_accepted:\n      - 200\n      - 204\n\n```\n{% /details %}\n###### With `header_match`\n\nExample configurations with `header_match`. See the value [pattern](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format) syntax.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n    # The "X-Robots-Tag" header must be present in the HTTP response header,\n    # but the value of the header does not matter.\n    # This config checks for the presence of the header regardless of its value.\n  - name: local\n    url: http://127.0.0.1:8080\n    header_match:\n      - key: X-Robots-Tag\n\n    # The "X-Robots-Tag" header must be present in the HTTP response header\n    # only if its value is equal to "noindex, nofollow".\n    # This config checks both the presence of the header and its value.\n  - name: local\n    url: http://127.0.0.1:8080\n    header_match:\n      - key: X-Robots-Tag\n        value: \'= noindex,nofollow\'\n\n    # The "X-Robots-Tag" header must not be present in the HTTP response header\n    # but the value of the header does not matter.\n    # This config checks for the presence of the header regardless of its value.\n  - name: local\n    url: http://127.0.0.1:8080\n    header_match:\n      - key: X-Robots-Tag\n        exclude: yes\n\n    # The "X-Robots-Tag" header must not be present in the HTTP response header\n    # only if its value is equal to "noindex, nofollow".\n    # This config checks both the presence of the header and its value.\n  - name: local\n    url: http://127.0.0.1:8080\n    header_match:\n      - key: X-Robots-Tag\n        exclude: yes\n        value: \'= noindex,nofollow\'\n\n```\n{% /details %}\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:8080\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080\n\n  - name: remote\n    url: http://192.0.2.1:8080\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `httpcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m httpcheck\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m httpcheck -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `httpcheck` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep httpcheck\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep httpcheck /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep httpcheck\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ httpcheck_web_service_up ](https://github.com/netdata/netdata/blob/master/src/health/health.d/httpcheck.conf) | httpcheck.status | HTTP check endpoint ${label:url} liveness status |\n| [ httpcheck_web_service_bad_content ](https://github.com/netdata/netdata/blob/master/src/health/health.d/httpcheck.conf) | httpcheck.status | Percentage of HTTP responses from ${label:url} with unexpected content in the last 5 minutes |\n| [ httpcheck_web_service_bad_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/httpcheck.conf) | httpcheck.status | Percentage of HTTP responses from ${label:url} with unexpected status in the last 5 minutes |\n| [ httpcheck_web_service_bad_header ](https://github.com/netdata/netdata/blob/master/src/health/health.d/httpcheck.conf) | httpcheck.status | Percentage of HTTP responses from ${label:url} with unexpected header in the last 5 minutes |\n| [ httpcheck_web_service_timeouts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/httpcheck.conf) | httpcheck.status | Percentage of timed-out HTTP requests to ${label:url} in the last 5 minutes |\n| [ httpcheck_web_service_no_connection ](https://github.com/netdata/netdata/blob/master/src/health/health.d/httpcheck.conf) | httpcheck.status | Percentage of failed HTTP requests to ${label:url} in the last 5 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per target\n\nThe metrics refer to the monitored target.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| url | url value that is set in the configuration file. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| httpcheck.response_time | time | ms |\n| httpcheck.response_length | length | characters |\n| httpcheck.status | success, timeout, redirect, no_connection, bad_content, bad_header, bad_status | boolean |\n| httpcheck.in_state | time | boolean |\n\n",integration_type:"collector",id:"go.d.plugin-httpcheck-HTTP_Endpoints",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/httpcheck/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"go.d.plugin",module_name:"icecast",monitored_instance:{name:"Icecast",link:"https://icecast.org/",categories:["data-collection.applications"],icon_filename:"icecast.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["icecast","streaming","media"]},overview:"# Icecast\n\nPlugin: go.d.plugin\nModule: icecast\n\n## Overview\n\nThis collector monitors Icecast listener counts.\n\nIt uses the Icecast server statistics `status-json.xsl` endpoint to retrieve the metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Icecast instances running on localhost that are listening on port 8000.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **icecast** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **icecast**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/icecast.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Icecast minimum version\n\nNeeds at least Icecast version >= 2.4.0\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8000 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **icecast** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the icecast data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _icecast_ (or scroll the list) to locate the **icecast** collector.\n5. Click the **+** next to the **icecast** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/icecast.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/icecast.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8000\n\n```\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8000\n\n  - name: remote\n    url: http://192.0.2.1:8000\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `icecast` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m icecast\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m icecast -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `icecast` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep icecast\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep icecast /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep icecast\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Icecast source\n\nThese metrics refer to an icecast source.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| source | Source name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| icecast.listeners | listeners | listeners |\n\n",integration_type:"collector",id:"go.d.plugin-icecast-Icecast",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/icecast/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-intelgpu",plugin_name:"go.d.plugin",module_name:"intelgpu",monitored_instance:{name:"Intel GPU",link:"https://www.intel.com/",icon_filename:"microchip.svg",categories:["data-collection.hardware-and-sensors"]},keywords:["intel","gpu","hardware"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Intel GPU\n\nPlugin: go.d.plugin\nModule: intelgpu\n\n## Overview\n\nThis collector gathers performance metrics for Intel integrated GPUs.\nIt relies on the [`intel_gpu_top`](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to grant the CAP_PERFMON capability to `intel_gpu_top`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **intelgpu** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **intelgpu**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/intelgpu.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install intel-gpu-tools\n\nInstall `intel-gpu-tools` using your distribution\'s package manager.\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 1 | no |\n| device | Select a specific GPU using [supported filter](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html#DESCRIPTION). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **intelgpu** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the intelgpu data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _intelgpu_ (or scroll the list) to locate the **intelgpu** collector.\n5. Click the **+** next to the **intelgpu** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/intelgpu.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/intelgpu.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: intelgpu\n    update_every: 5  # Collect Intel iGPU metrics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `intelgpu` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m intelgpu\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m intelgpu -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `intelgpu` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep intelgpu\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep intelgpu /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep intelgpu\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Intel GPU instance\n\nThese metrics refer to the Intel GPU.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| intelgpu.frequency | frequency | MHz |\n| intelgpu.power | gpu, package | Watts |\n\n### Per engine\n\nThese metrics refer to the GPU hardware engine.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| engine_class | Engine class (Render/3D, Blitter, VideoEnhance, Video, Compute). |\n| engine_instance | Engine instance (e.g. Render/3D/0, Video/0, Video/1). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| intelgpu.engine_busy_perc | busy | percentage |\n\n",integration_type:"collector",id:"go.d.plugin-intelgpu-Intel_GPU",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/intelgpu/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-ipfs",plugin_name:"go.d.plugin",module_name:"ipfs",monitored_instance:{name:"IPFS",link:"https://ipfs.tech/",categories:["data-collection.storage"],icon_filename:"ipfs.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ipfs","filesystem"]},overview:"# IPFS\n\nPlugin: go.d.plugin\nModule: ipfs\n\n## Overview\n\nThis collector monitors IPFS daemon health and network activity.\n\nIt uses [RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) to collect metrics.\n\nUsed endpoints:\n\n- [/api/v0/stats/bw](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-stats-bw)\n- [/api/v0/swarm/peers](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-swarm-peers)\n- [/api/v0/stats/repo](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-stats-repo)\n- [/api/v0/pin/ls](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects IPFS instances running on localhost that are listening on port 5001.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nCalls to the following endpoints are disabled by default due to IPFS bugs:\n\n- /api/v0/stats/repo ([#7528](https://github.com/ipfs/go-ipfs/issues/7528)).\n- /api/v0/pin/ls ([#3874](https://github.com/ipfs/go-ipfs/issues/3874)).\n\n**Disabled by default** due to potential high CPU usage. Consider enabling only if necessary.\n\n",setup:'## Setup\n\n\nYou can configure the **ipfs** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **ipfs**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/ipfs.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:5001 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **Metrics Selection** | repoapi | Collect repository statistics from the [/api/v0/stats/repo](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-repo-stat) endpoint. | no | no |\n|  | pinapi | Collect pinned objects list from the [/api/v0/pin/ls](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls) endpoint. | no | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **ipfs** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the ipfs data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _ipfs_ (or scroll the list) to locate the **ipfs** collector.\n5. Click the **+** next to the **ipfs** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/ipfs.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ipfs.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:5001\n\n```\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:5001\n\n  - name: remote\n    url: http://192.0.2.1:5001\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ipfs` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m ipfs\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m ipfs -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ipfs` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ipfs\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ipfs /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ipfs\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipfs_datastore_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipfs.conf) | ipfs.datastore_space_utilization | IPFS datastore utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPFS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfs.bandwidth | in, out | bytes/s |\n| ipfs.peers | peers | peers |\n| ipfs.datastore_space_utilization | used | percent |\n| ipfs.repo_size | size | bytes |\n| ipfs.repo_objects | objects | objects |\n| ipfs.repo_pinned_objects | pinned, recursive_pins | objects |\n\n",integration_type:"collector",id:"go.d.plugin-ipfs-IPFS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/ipfs/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-isc_dhcpd",plugin_name:"go.d.plugin",module_name:"isc_dhcpd",monitored_instance:{name:"ISC DHCP",link:"https://www.isc.org/dhcp/",categories:["data-collection.networking"],icon_filename:"isc.png"},keywords:["dhcpd","dhcp"],info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# ISC DHCP\n\nPlugin: go.d.plugin\nModule: isc_dhcpd\n\n## Overview\n\nThis collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases).\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **isc_dhcpd** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **isc_dhcpd**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/isc_dhcpd.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | leases_path | Path to DHCP client lease database. | /var/lib/dhcp/dhcpd.leases | no |\n|  | [pools](#option-target-pools) | DHCP IP pools to monitor. |  | yes |\n\n<a id="option-target-pools"></a>\n##### pools\n\nList of IP pools to monitor.\n\n- IP range syntax: see [supported formats](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/iprange#supported-formats).\n- Syntax:\n\n```yaml\npools:\n  - name: "POOL_NAME1"\n    networks: "SPACE SEPARATED LIST OF IP RANGES"\n  - name: "POOL_NAME2"\n    networks: "SPACE SEPARATED LIST OF IP RANGES"\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **isc_dhcpd** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the isc_dhcpd data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _isc_dhcpd_ (or scroll the list) to locate the **isc_dhcpd** collector.\n5. Click the **+** next to the **isc_dhcpd** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/isc_dhcpd.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/isc_dhcpd.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    pools:\n      - name: lan\n        networks: "192.168.0.0/24 192.168.1.0/24 192.168.2.0/24"\n      - name: wifi\n        networks: "10.0.0.0/24"\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `isc_dhcpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m isc_dhcpd\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m isc_dhcpd -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `isc_dhcpd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep isc_dhcpd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep isc_dhcpd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep isc_dhcpd\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ISC DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| isc_dhcpd.active_leases_total | active | leases |\n\n### Per ISC DHCP instance\n\nThese metrics refer to the DHCP pool.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| dhcp_pool_name | The DHCP pool name defined in the collector configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| isc_dhcpd.dhcp_pool_utilization | utilization | percent |\n| isc_dhcpd.dhcp_pool_active_leases | active | leases |\n\n",integration_type:"collector",id:"go.d.plugin-isc_dhcpd-ISC_DHCP",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/isc_dhcpd/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-k8s_apiserver",plugin_name:"go.d.plugin",module_name:"k8s_apiserver",monitored_instance:{name:"Kubernetes API Server",link:"https://kubernetes.io/docs/concepts/overview/components/#kube-apiserver",icon_filename:"kubernetes.svg",categories:["data-collection.containers-and-vms"]},keywords:["kubernetes","k8s","apiserver","kube-apiserver"],related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"k8s_kubelet"},{plugin_name:"go.d.plugin",module_name:"k8s_kubeproxy"},{plugin_name:"go.d.plugin",module_name:"k8s_state"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Kubernetes Containers"},{plugin_name:"go.d.plugin",module_name:"coredns"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# Kubernetes API Server\n\nPlugin: go.d.plugin\nModule: k8s_apiserver\n\n## Overview\n\nThis collector monitors Kubernetes API Server health, performance, and request metrics.\n\nIt collects metrics from the kube-apiserver\'s `/metrics` endpoint, providing insights into:\n- Request rates, latencies, and error rates\n- Current inflight and long-running requests\n- Admission controller and webhook performance\n- etcd backend health and object counts\n- Controller work queue depths and latencies\n- Authentication and audit events\n- Go runtime and process metrics\n\n\nThe collector scrapes Prometheus-format metrics from the Kubernetes API Server\'s metrics endpoint.\nAuthentication is typically done using a ServiceAccount bearer token.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe ServiceAccount used must have permissions to access the `/metrics` endpoint.\nIn most clusters, this requires cluster-admin or a custom ClusterRole with metrics access.\n\n\nKubernetes API Server can be monitored further using the following other integrations:\n\n- {% relatedResource id="go.d.plugin-k8s_kubelet-Kubelet" %}Kubelet{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_kubeproxy-Kubeproxy" %}Kubeproxy{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_state-Kubernetes_Cluster_State" %}Kubernetes Cluster State{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Kubernetes_Containers" %}Kubernetes Containers{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-coredns-CoreDNS" %}CoreDNS{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen running inside a Kubernetes cluster, the collector attempts to connect to\n`https://kubernetes.default.svc:443/metrics` using the pod\'s ServiceAccount token.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **k8s_apiserver** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **k8s_apiserver**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/k8s_apiserver.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### ServiceAccount with metrics access\n\nThe Netdata pod must have a ServiceAccount with permissions to read metrics from the API server.\nYou can create a ClusterRole and ClusterRoleBinding for this purpose.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | https://kubernetes.default.svc:443/metrics | yes |\n|  | timeout | HTTP request timeout (seconds). | 2 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). | /var/run/secrets/kubernetes.io/serviceaccount/token | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. | /var/run/secrets/kubernetes.io/serviceaccount/ca.crt | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **k8s_apiserver** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the k8s_apiserver data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _k8s_apiserver_ (or scroll the list) to locate the **k8s_apiserver** collector.\n5. Click the **+** next to the **k8s_apiserver** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/k8s_apiserver.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_apiserver.conf\n```\n\n##### Examples\n\n###### In-cluster (default)\n\nDefault configuration when running inside a Kubernetes cluster.\n\n```yaml\njobs:\n  - name: local\n    url: https://kubernetes.default.svc:443/metrics\n    bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n    tls_ca: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n\n```\n###### External access with kubectl proxy\n\nAccess API server metrics via kubectl proxy running on localhost.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: via-proxy\n    url: http://127.0.0.1:8001/metrics\n\n```\n{% /details %}\n###### Direct access with token\n\nDirect access to API server with a bearer token.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: direct\n    url: https://api.example.com:6443/metrics\n    bearer_token_file: /path/to/token\n    tls_skip_verify: yes\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `k8s_apiserver` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m k8s_apiserver\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m k8s_apiserver -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `k8s_apiserver` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep k8s_apiserver\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep k8s_apiserver /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep k8s_apiserver\n```\n\n### Connection refused\n\nThe API server may not be accessible. Check that:\n1. The URL is correct\n2. Network policies allow access\n3. The ServiceAccount has proper RBAC permissions\n\n\n### 401 Unauthorized\n\nAuthentication failed. Verify:\n1. The bearer token file exists and is readable\n2. The token is valid and not expired\n3. The ServiceAccount has metrics access permissions\n\n\n### Certificate errors\n\nTLS verification failed. Options:\n1. Provide the correct CA certificate path in `tls_ca`\n2. Set `tls_skip_verify: yes` (not recommended for production)\n\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ k8s_apiserver_request_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/k8s_apiserver.conf) | k8s_apiserver.requests_by_code | high rate of API server request errors (5xx responses) |\n| [ k8s_apiserver_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/k8s_apiserver.conf) | k8s_apiserver.request_latency | API server request latency is high |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubernetes API Server instance\n\nThese metrics refer to the entire monitored API server instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_apiserver.requests_total | requests | requests/s |\n| k8s_apiserver.requests_dropped | dropped | requests/s |\n| k8s_apiserver.requests_by_verb | a dimension per HTTP verb | requests/s |\n| k8s_apiserver.requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_apiserver.requests_by_resource | a dimension per Kubernetes resource | requests/s |\n| k8s_apiserver.request_latency | p50, p90, p99 | milliseconds |\n| k8s_apiserver.response_size | p50, p90, p99 | bytes |\n| k8s_apiserver.inflight_requests | mutating, read_only | requests |\n| k8s_apiserver.longrunning_requests | longrunning | requests |\n| k8s_apiserver.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_apiserver.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n| k8s_apiserver.rest_client_latency | p50, p90, p99 | milliseconds |\n| k8s_apiserver.admission_step_latency | validate, admit | milliseconds |\n| k8s_apiserver.etcd_object_counts | a dimension per resource type | objects |\n| k8s_apiserver.audit_events | events, rejected | events/s |\n| k8s_apiserver.authentication_requests | authenticated | requests/s |\n| k8s_apiserver.goroutines | goroutines | goroutines |\n| k8s_apiserver.threads | threads | threads |\n| k8s_apiserver.process_memory | resident, virtual | bytes |\n| k8s_apiserver.heap_memory | alloc, inuse, stack | bytes |\n| k8s_apiserver.gc_duration | min, p25, p50, p75, max | seconds |\n| k8s_apiserver.open_fds | open, max | file descriptors |\n| k8s_apiserver.cpu_usage | cpu | seconds/s |\n\n### Per workqueue\n\nThese metrics refer to controller work queues.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| controller | Controller name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_apiserver.workqueue_depth | depth | items |\n| k8s_apiserver.workqueue_latency | p50, p90, p99 | microseconds |\n| k8s_apiserver.workqueue_adds | adds, retries | items/s |\n| k8s_apiserver.workqueue_duration | p50, p90, p99 | microseconds |\n\n### Per admission controller\n\nThese metrics refer to admission controllers.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| name | Admission controller name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_apiserver.admission_controller_latency | 5ms, 25ms, 100ms, 500ms, 1s, 2.5s, +Inf | events/s |\n\n### Per admission webhook\n\nThese metrics refer to admission webhooks.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| name | Webhook name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_apiserver.admission_webhook_latency | 5ms, 25ms, 100ms, 500ms, 1s, 2.5s, +Inf | events/s |\n\n",integration_type:"collector",id:"go.d.plugin-k8s_apiserver-Kubernetes_API_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/k8s_apiserver/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-k8s_kubelet",plugin_name:"go.d.plugin",module_name:"k8s_kubelet",monitored_instance:{name:"Kubelet",link:"https://kubernetes.io/docs/concepts/overview/components/#kubelet",icon_filename:"kubernetes.svg",categories:["data-collection.containers-and-vms"]},keywords:["kubelet","kubernetes","k8s"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"go.d.plugin",module_name:"k8s_state"},{plugin_name:"go.d.plugin",module_name:"k8s_apiserver"},{plugin_name:"go.d.plugin",module_name:"k8s_kubeproxy"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Kubernetes Containers"},{plugin_name:"go.d.plugin",module_name:"coredns"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# Kubelet\n\nPlugin: go.d.plugin\nModule: k8s_kubelet\n\n## Overview\n\nThis collector monitors Kubelet instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nKubelet can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_state-Kubernetes_Cluster_State" %}Kubernetes Cluster State{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_apiserver-Kubernetes_API_Server" %}Kubernetes API Server{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_kubeproxy-Kubeproxy" %}Kubeproxy{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Kubernetes_Containers" %}Kubernetes Containers{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-coredns-CoreDNS" %}CoreDNS{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn\'t support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **k8s_kubelet** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **k8s_kubelet**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/k8s_kubelet.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:10255/metrics | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). | /var/run/secrets/kubernetes.io/serviceaccount/token | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **k8s_kubelet** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the k8s_kubelet data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _k8s_kubelet_ (or scroll the list) to locate the **k8s_kubelet** collector.\n5. Click the **+** next to the **k8s_kubelet** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/k8s_kubelet.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubelet.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:10255/metrics\n\n```\n###### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:10250/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `k8s_kubelet` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m k8s_kubelet\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m k8s_kubelet -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `k8s_kubelet` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep k8s_kubelet\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep k8s_kubelet /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep k8s_kubelet\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ kubelet_node_config_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_node_config_error | the node is experiencing a configuration-related error (0: false, 1: true) |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubelet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.apiserver_audit_requests_rejected | rejected | requests/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_failures | failures | events/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies | 5_\xb5s, 10_\xb5s, 20_\xb5s, 40_\xb5s, 80_\xb5s, 160_\xb5s, 320_\xb5s, 640_\xb5s, 1280_\xb5s, 2560_\xb5s, 5120_\xb5s, 10240_\xb5s, 20480_\xb5s, 40960_\xb5s, +Inf | observes/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent | 5_\xb5s, 10_\xb5s, 20_\xb5s, 40_\xb5s, 80_\xb5s, 160_\xb5s, 320_\xb5s, 640_\xb5s, 1280_\xb5s, 2560_\xb5s, 5120_\xb5s, 10240_\xb5s, 20480_\xb5s, 40960_\xb5s, +Inf | percentage |\n| k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses | cache misses | events/s |\n| k8s_kubelet.kubelet_containers_running | total | running_containers |\n| k8s_kubelet.kubelet_pods_running | total | running_pods |\n| k8s_kubelet.kubelet_pods_log_filesystem_used_bytes | a dimension per namespace and pod | B |\n| k8s_kubelet.kubelet_runtime_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_runtime_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_docker_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_docker_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_node_config_error | experiencing_error | bool |\n| k8s_kubelet.kubelet_pleg_relist_interval_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_pleg_relist_latency_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_token_requests | total, failed | token_requests/s |\n| k8s_kubelet.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubelet.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n\n### Per volume manager\n\nThese metrics refer to the Volume Manager.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.volume_manager_total_volumes | actual, desired | state |\n\n",integration_type:"collector",id:"go.d.plugin-k8s_kubelet-Kubelet",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/k8s_kubelet/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-k8s_kubeproxy",plugin_name:"go.d.plugin",module_name:"k8s_kubeproxy",monitored_instance:{name:"Kubeproxy",link:"https://kubernetes.io/docs/concepts/overview/components/#kube-proxy",icon_filename:"kubernetes.svg",categories:["data-collection.containers-and-vms"]},keywords:["kubeproxy","kubernetes","k8s"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"go.d.plugin",module_name:"k8s_state"},{plugin_name:"go.d.plugin",module_name:"k8s_apiserver"},{plugin_name:"go.d.plugin",module_name:"k8s_kubelet"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Kubernetes Containers"},{plugin_name:"go.d.plugin",module_name:"coredns"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# Kubeproxy\n\nPlugin: go.d.plugin\nModule: k8s_kubeproxy\n\n## Overview\n\nThis collector monitors Kubeproxy instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nKubeproxy can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_state-Kubernetes_Cluster_State" %}Kubernetes Cluster State{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_apiserver-Kubernetes_API_Server" %}Kubernetes API Server{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_kubelet-Kubelet" %}Kubelet{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Kubernetes_Containers" %}Kubernetes Containers{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-coredns-CoreDNS" %}CoreDNS{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn\'t support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **k8s_kubeproxy** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **k8s_kubeproxy**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/k8s_kubeproxy.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:10249/metrics | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **k8s_kubeproxy** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the k8s_kubeproxy data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _k8s_kubeproxy_ (or scroll the list) to locate the **k8s_kubeproxy** collector.\n5. Click the **+** next to the **k8s_kubeproxy** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/k8s_kubeproxy.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubeproxy.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:10249/metrics\n\n```\n###### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:10249/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `k8s_kubeproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m k8s_kubeproxy\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m k8s_kubeproxy -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `k8s_kubeproxy` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep k8s_kubeproxy\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep k8s_kubeproxy /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep k8s_kubeproxy\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubeproxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules | sync_proxy_rules | events/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microsecond | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | observes/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | percentage |\n| k8s_kubeproxy.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubeproxy.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n| k8s_kubeproxy.http_request_duration | 0.5, 0.9, 0.99 | microseconds |\n\n",integration_type:"collector",id:"go.d.plugin-k8s_kubeproxy-Kubeproxy",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/k8s_kubeproxy/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-k8s_state",plugin_name:"go.d.plugin",module_name:"k8s_state",monitored_instance:{name:"Kubernetes Cluster State",link:"https://kubernetes.io/",icon_filename:"kubernetes.svg",categories:["data-collection.containers-and-vms"]},keywords:["kubernetes","k8s"],related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"k8s_apiserver"},{plugin_name:"go.d.plugin",module_name:"k8s_kubelet"},{plugin_name:"go.d.plugin",module_name:"k8s_kubeproxy"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Kubernetes Containers"},{plugin_name:"go.d.plugin",module_name:"coredns"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# Kubernetes Cluster State\n\nPlugin: go.d.plugin\nModule: k8s_state\n\n## Overview\n\nThis collector monitors Kubernetes Nodes, Pods and Containers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\nKubernetes Cluster State can be monitored further using the following other integrations:\n\n- {% relatedResource id="go.d.plugin-k8s_apiserver-Kubernetes_API_Server" %}Kubernetes API Server{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_kubelet-Kubelet" %}Kubelet{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-k8s_kubeproxy-Kubeproxy" %}Kubeproxy{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Kubernetes_Containers" %}Kubernetes Containers{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-coredns-CoreDNS" %}CoreDNS{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn\'t support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:"## Setup\n\n\nYou can configure the **k8s_state** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **k8s_state**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/k8s_state.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n#### via UI\n\nConfigure the **k8s_state** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the k8s_state data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _k8s_state_ (or scroll the list) to locate the **k8s_state** collector.\n5. Click the **+** next to the **k8s_state** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/k8s_state.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_state.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `k8s_state` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m k8s_state\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m k8s_state -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `k8s_state` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep k8s_state\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep k8s_state /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep k8s_state\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ k8s_state_deployment_condition_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/k8sstate.conf) | k8s_state.deployment_conditions | Deployment ${label:k8s_deployment_name} does not have the minimum required replicas |\n| [ k8s_state_cronjob_last_execution_failed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/k8sstate.conf) | k8s_state.cronjob_last_execution_status | CronJob ${label:k8s_cronjob_name} in ${label:k8s_namespace} failing |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the Node.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.node_allocatable_cpu_requests_utilization | requests | % |\n| k8s_state.node_allocatable_cpu_requests_used | requests | millicpu |\n| k8s_state.node_allocatable_cpu_limits_utilization | limits | % |\n| k8s_state.node_allocatable_cpu_limits_used | limits | millicpu |\n| k8s_state.node_allocatable_mem_requests_utilization | requests | % |\n| k8s_state.node_allocatable_mem_requests_used | requests | bytes |\n| k8s_state.node_allocatable_mem_limits_utilization | limits | % |\n| k8s_state.node_allocatable_mem_limits_used | limits | bytes |\n| k8s_state.node_allocatable_pods_utilization | allocated | % |\n| k8s_state.node_allocatable_pods_usage | available, allocated | pods |\n| k8s_state.node_condition | Ready, DiskPressure, MemoryPressure, NetworkUnavailable, PIDPressure | status |\n| k8s_state.node_schedulability | schedulable, unschedulable | state |\n| k8s_state.node_pods_readiness | ready | % |\n| k8s_state.node_pods_readiness_state | ready, unready | pods |\n| k8s_state.node_pods_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | pods |\n| k8s_state.node_pods_phase | running, failed, succeeded, pending | pods |\n| k8s_state.node_containers | containers, init_containers | containers |\n| k8s_state.node_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_init_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_age | age | seconds |\n\n### Per deployment\n\nThese metrics refer to Deployments.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_deployment_name | Deployment name. |\n| k8s_namespace | Namespace. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.deployment_conditions | available, replica_failure, progressing | status |\n| k8s_state.deployment_replicas | desired, current, ready | replicas |\n| k8s_state.deployment_age | age | seconds |\n\n### Per cronjob\n\nThese metrics refer to CronJobs.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_cronjob_name | CronJob name. |\n| k8s_namespace | Namespace. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.cronjob_jobs_count_by_status | completed, failed, running, suspended | jobs |\n| k8s_state.cronjob_jobs_failed_by_reason | pod_failure_policy, backoff_limit_exceeded, deadline_exceeded | jobs |\n| k8s_state.cronjob_last_execution_status | completed, failed | status |\n| k8s_state.cronjob_last_completion_duration | last_completion | seconds |\n| k8s_state.cronjob_last_completed_time_ago | last_completed_ago | seconds |\n| k8s_state.cronjob_last_schedule_time_ago | last_schedule_ago | seconds |\n| k8s_state.cronjob_suspend_status | enabled, suspended | status |\n| k8s_state.cronjob_age | age | seconds |\n\n### Per pod\n\nThese metrics refer to the Pod.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_cpu_requests_used | requests | millicpu |\n| k8s_state.pod_cpu_limits_used | limits | millicpu |\n| k8s_state.pod_mem_requests_used | requests | bytes |\n| k8s_state.pod_mem_limits_used | limits | bytes |\n| k8s_state.pod_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | state |\n| k8s_state.pod_phase | running, failed, succeeded, pending | state |\n| k8s_state.pod_status_reason | Evicted, NodeAffinity, NodeLost, Shutdown, UnexpectedAdmissionError, Other | status |\n| k8s_state.pod_age | age | seconds |\n| k8s_state.pod_containers | containers, init_containers | containers |\n| k8s_state.pod_containers_state | running, waiting, terminated | containers |\n| k8s_state.pod_init_containers_state | running, waiting, terminated | containers |\n\n### Per container\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n| k8s_container_name | Container name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_container_readiness_state | ready | state |\n| k8s_state.pod_container_restarts | restarts | restarts |\n| k8s_state.pod_container_state | running, waiting, terminated | state |\n| k8s_state.pod_container_waiting_state_reason | ContainerCreating, CrashLoopBackOff, CreateContainerConfigError, CreateContainerError, ErrImagePull, ImagePullBackOff, InvalidImageName, PodInitializing, Other | state |\n| k8s_state.pod_container_terminated_state_reason | Completed, ContainerCannotRun, DeadlineExceeded, Error, Evicted, OOMKilled, Other | state |\n\n",integration_type:"collector",id:"go.d.plugin-k8s_state-Kubernetes_Cluster_State",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/k8s_state/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-lighttpd",plugin_name:"go.d.plugin",module_name:"lighttpd",monitored_instance:{name:"Lighttpd",link:"https://www.lighttpd.net/",icon_filename:"lighttpd.svg",categories:["data-collection.web-servers-and-proxies"]},keywords:["webserver"],related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"web_log"},{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# Lighttpd\n\nPlugin: go.d.plugin\nModule: lighttpd\n\n## Overview\n\nThis collector monitors the activity and performance of Lighttpd servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Lighttpd location [server-status](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status), \nwhich is a built-in location that provides metrics about the Lighttpd server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nLighttpd can be monitored further using the following other integrations:\n\n- {% relatedResource id="go.d.plugin-web_log-Web_server_log_files" %}Web server log files{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-httpcheck-HTTP_Endpoints" %}HTTP Endpoints{% /relatedResource %}\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Lighttpd instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **lighttpd** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **lighttpd**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/lighttpd.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable Lighttpd status support\n\nTo enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1/server-status?auto | yes |\n|  | timeout | HTTP request timeout (seconds). | 2 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **lighttpd** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the lighttpd data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _lighttpd_ (or scroll the list) to locate the **lighttpd** collector.\n5. Click the **+** next to the **lighttpd** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/lighttpd.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lighttpd.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/server-status?auto\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/server-status?auto\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nLighttpd with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1/server-status?auto\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/server-status?auto\n\n  - name: remote\n    url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `lighttpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m lighttpd\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m lighttpd -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `lighttpd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep lighttpd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep lighttpd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep lighttpd\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Lighttpd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| lighttpd.requests | requests | requests/s |\n| lighttpd.net | sent | kilobits/s |\n| lighttpd.workers | idle, busy | servers |\n| lighttpd.scoreboard | waiting, open, close, hard_error, keepalive, read, read_post, write, handle_request, request_start, request_end | connections |\n| lighttpd.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-lighttpd-Lighttpd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/lighttpd/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-litespeed",plugin_name:"go.d.plugin",module_name:"litespeed",monitored_instance:{name:"Litespeed",link:"https://www.litespeedtech.com/products/litespeed-web-server",categories:["data-collection.web-servers-and-proxies"],icon_filename:"litespeed.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["litespeed","web","server"]},overview:"# Litespeed\n\nPlugin: go.d.plugin\nModule: litespeed\n\n## Overview\n\nExamine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery.\n\nThe collector uses the statistics under /tmp/lshttpd to gather the metrics.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is present, the collector will attempt to read files under /tmp/lshttpd/.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\nYou can configure the **litespeed** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **litespeed**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/litespeed.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| reports_dir | Directory containing Litespeed's real-time statistics files. | /tmp/lshttpd/ | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **litespeed** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the litespeed data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _litespeed_ (or scroll the list) to locate the **litespeed** collector.\n5. Click the **+** next to the **litespeed** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/litespeed.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/litespeed.conf\n```\n\n##### Examples\n\n###### Set the path to statistics\n\nChange the path for the litespeed stats files\n\n```yaml\nlocal:\n name: 'local'\n path: '/tmp/lshttpd'\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `litespeed` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m litespeed\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m litespeed -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `litespeed` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep litespeed\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep litespeed /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep litespeed\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Litespeed instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| litespeed.requests | requests | requests/s |\n| litespeed.requests_processing | processing | requests |\n| litespeed.net_throughput | in, out | kilobits/s |\n| litespeed.net_ssl_throughput | in, out | kilobits/s |\n| litespeed.connections | free, used | conns |\n| litespeed.ssl_connections | free, used | conns |\n| litespeed.public_cache | hits | hits/s |\n| litespeed.private_cache | hits | hits/s |\n| litespeed.static | hits | hits/s |\n\n",integration_type:"collector",id:"go.d.plugin-litespeed-Litespeed",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/litespeed/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-logind",plugin_name:"go.d.plugin",module_name:"logind",monitored_instance:{name:"systemd-logind users",link:"https://www.freedesktop.org/software/systemd/man/systemd-logind.service.html",icon_filename:"users.svg",categories:["data-collection.operating-systems"]},keywords:["logind","systemd"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# systemd-logind users\n\nPlugin: go.d.plugin\nModule: logind\n\n## Overview\n\nThis collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\nYou can configure the **logind** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **logind**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/logind.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n\n#### via UI\n\nConfigure the **logind** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the logind data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _logind_ (or scroll the list) to locate the **logind** collector.\n5. Click the **+** next to the **logind** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/logind.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logind.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `logind` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m logind\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m logind -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `logind` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep logind\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep logind /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep logind\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd-logind users instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logind.sessions | remote, local | sessions |\n| logind.sessions_type | console, graphical, other | sessions |\n| logind.sessions_state | online, closing, active | sessions |\n| logind.users_state | offline, closing, online, lingering, active | users |\n\n",integration_type:"collector",id:"go.d.plugin-logind-systemd-logind_users",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/logind/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-logstash",plugin_name:"go.d.plugin",module_name:"logstash",monitored_instance:{name:"Logstash",link:"https://www.elastic.co/products/logstash",icon_filename:"elastic-logstash.svg",categories:["data-collection.applications"]},keywords:["logstash"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Logstash\n\nPlugin: go.d.plugin\nModule: logstash\n\n## Overview\n\nThis collector monitors Logstash instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **logstash** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **logstash**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/logstatsh.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://localhost:9600 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **logstash** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the logstash data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _logstash_ (or scroll the list) to locate the **logstash** collector.\n5. Click the **+** next to the **logstash** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/logstatsh.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logstatsh.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://localhost:9600\n\n```\n{% /details %}\n###### HTTP authentication\n\nHTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://localhost:9600\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nHTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://localhost:9600\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://localhost:9600\n\n  - name: remote\n    url: http://192.0.2.1:9600\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `logstash` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m logstash\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m logstash -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `logstash` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep logstash\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep logstash /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep logstash\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Logstash instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.jvm_threads | threads | count |\n| logstash.jvm_mem_heap_used | in_use | percentage |\n| logstash.jvm_mem_heap | committed, used | KiB |\n| logstash.jvm_mem_pools_eden | committed, used | KiB |\n| logstash.jvm_mem_pools_survivor | committed, used | KiB |\n| logstash.jvm_mem_pools_old | committed, used | KiB |\n| logstash.jvm_gc_collector_count | eden, old | counts/s |\n| logstash.jvm_gc_collector_time | eden, old | ms |\n| logstash.open_file_descriptors | open | fd |\n| logstash.event | in, filtered, out | events/s |\n| logstash.event_duration | event, queue | seconds |\n| logstash.uptime | uptime | seconds |\n\n### Per pipeline\n\nThese metrics refer to the pipeline.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| pipeline | pipeline name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.pipeline_event | in, filtered, out | events/s |\n| logstash.pipeline_event_duration | event, queue | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-logstash-Logstash",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/logstash/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-lvm",plugin_name:"go.d.plugin",module_name:"lvm",monitored_instance:{name:"LVM logical volumes",link:"",icon_filename:"filesystem.svg",categories:["data-collection.storage"]},keywords:["lvm","lvs"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# LVM logical volumes\n\nPlugin: go.d.plugin\nModule: lvm\n\n## Overview\n\nThis collector monitors the health of LVM logical volumes. It relies on the [`lvs`](https://man7.org/linux/man-pages/man8/lvs.8.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- NetBSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **lvm** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **lvm**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/lvm.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | lvs binary execution timeout. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **lvm** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the lvm data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _lvm_ (or scroll the list) to locate the **lvm** collector.\n5. Click the **+** next to the **lvm** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/lvm.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lvm.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: lvm\n    update_every: 5  # Collect logical volume statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `lvm` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m lvm\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m lvm -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `lvm` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep lvm\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep lvm /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep lvm\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ lvm_lv_data_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/lvm.conf) | lvm.lv_data_space_utilization | LVM logical volume high data space usage (LV ${label:lv_name} VG ${label:vg_name} Type ${label:volume_type}) |\n| [ lvm_lv_metadata_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/lvm.conf) | lvm.lv_metadata_space_utilization | LVM logical volume high metadata space usage (LV ${label:lv_name} VG ${label:vg_name} Type ${label:volume_type}) |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per logical volume\n\nThese metrics refer to the LVM logical volume.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| lv_name | Logical volume name |\n| vg_name | Volume group name |\n| volume_type | Type of the volume |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| lvm.lv_data_space_utilization | utilization | % |\n| lvm.lv_metadata_space_utilization | utilization | % |\n\n",integration_type:"collector",id:"go.d.plugin-lvm-LVM_logical_volumes",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/lvm/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-maxscale",plugin_name:"go.d.plugin",module_name:"maxscale",monitored_instance:{name:"MaxScale",link:"https://mariadb.com/kb/en/maxscale/",categories:["data-collection.databases"],icon_filename:"maxscale.svg"},related_resources:{integrations:{list:[]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["maria","mariadb","maxscale","database","db"]},overview:"# MaxScale\n\nPlugin: go.d.plugin\nModule: maxscale\n\n## Overview\n\nThis collector monitors the activity and performance of MaxScale servers.\n\n\nIt sends HTTP requests to the MaxScale [REST API](https://mariadb.com/kb/en/maxscale-24-02rest-api/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect MaxScale instances running on:\n\n- localhost that are listening on port 8989\n- within Docker containers\n\n> **Note that the MaxScale REST API requires a username and password**. \n> While Netdata can automatically detect MaxScale instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **maxscale** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **maxscale**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/maxscale.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8989 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. | admin | no |\n|  | password | Password for Basic HTTP authentication. | mariadb | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **maxscale** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the maxscale data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _maxscale_ (or scroll the list) to locate the **maxscale** collector.\n5. Click the **+** next to the **maxscale** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/maxscale.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/maxscale.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8989\n    username: admin\n    password: mariadb\n\n```\n###### HTTPS with self-signed certificate\n\nMaxScale with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8989\n    username: admin\n    password: mariadb\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8989\n    username: admin\n    password: mariadb\n\n  - name: remote\n    url: http://192.0.2.1:8989\n    username: admin\n    password: mariadb\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `maxscale` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m maxscale\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m maxscale -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `maxscale` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep maxscale\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep maxscale /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep maxscale\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MaxScale instance\n\nThese metrics refer to the monitored MaxScale instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| maxscale.poll_events | reads, writes, accepts, errors, hangups | events/s |\n| maxscale.current_sessions | sessions | sessions |\n| maxscale.current_zombie_connections | zombie | connections |\n| maxscale.threads_by_state | active, draining, dormant | threads |\n| maxscale.current_fds | managed | fds |\n| maxscale.qc_cache_efficiency | hits, misses | requests/s |\n| maxscale.qc_cache_operations | inserts, evictions | operations/s |\n| maxscale.uptime | uptime | seconds |\n\n### Per server\n\nThese metrics refer to the MariaDB server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| server | Server ID. |\n| address | Server address. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| maxscale.server_state | master, slave, running, down, maintenance, draining, drained, relay_master, binlog_relay, synced | state |\n| maxscale.server_current_connections | connections | connections |\n\n",integration_type:"collector",id:"go.d.plugin-maxscale-MaxScale",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/maxscale/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-megacli",plugin_name:"go.d.plugin",module_name:"megacli",monitored_instance:{name:"MegaCLI MegaRAID",link:"https://wikitech.wikimedia.org/wiki/MegaCli",icon_filename:"hard-drive.svg",categories:["data-collection.storage"]},keywords:["storage","raid-controller","manage-disks"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# MegaCLI MegaRAID\n\nPlugin: go.d.plugin\nModule: megacli\n\n## Overview\n\nMonitors the health of MegaCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.\nIt relies on the `megacli` CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n-  `megacli -LDPDInfo -aAll -NoLog`\n-  `megacli -AdpBbuCmd -aAll -NoLog`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **megacli** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **megacli**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/megacli.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | megacli binary execution timeout. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **megacli** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the megacli data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _megacli_ (or scroll the list) to locate the **megacli** collector.\n5. Click the **+** next to the **megacli** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/megacli.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/megacli.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: megacli\n    update_every: 5  # Collect MegaCli Hardware RAID statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `megacli` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m megacli\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m megacli -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `megacli` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep megacli\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep megacli /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep megacli\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ megacli_adapter_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.adapter_health_state | MegaCLI adapter ${label:adapter_number} is in the degraded state |\n| [ megacli_phys_drive_media_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.phys_drive_media_errors | MegaCLI physical drive adapter ${label:adapter_number} slot ${label:slot_number} media errors |\n| [ megacli_phys_drive_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.phys_drive_predictive_failures | MegaCLI physical drive (adapter ${label:adapter_number} slot ${label:slot_number}) predictive failures |\n| [ megacli_bbu_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_charge | MegaCLI Backup Battery Unit (adapter ${label:adapter_number}) average charge over the last minute |\n| [ megacli_bbu_recharge_cycles ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_recharge_cycles | MegaCLI Backup Battery Unit (adapter ${label:adapter_number}) average charge over the last minute |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per adapter\n\nThese metrics refer to the MegaCLI Adapter.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| adapter_number | Adapter number |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.adapter_health_state | optimal, degraded, partially_degraded, failed | state |\n\n### Per physical drive\n\nThese metrics refer to the MegaCLI Physical Drive.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| adapter_number | Adapter number |\n| wwn | World Wide Name |\n| slot_number | Slot number |\n| drive_position | Position (e.g. DiskGroup: 0, Span: 0, Arm: 2) |\n| drive_type | Type (e.g. SATA) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.phys_drive_media_errors_rate | media_errors | errors/s |\n| megacli.phys_drive_predictive_failures_rate | predictive_failures | failures/s |\n\n### Per backup battery unit\n\nThese metrics refer to the MegaCLI Backup Battery Unit.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| adapter_number | Adapter number |\n| battery_type | Battery type (e.g. BBU) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.bbu_charge | charge | percentage |\n| megacli.bbu_recharge_cycles | recharge | cycles |\n| megacli.bbu_capacity_degradation | cap_degradation | percent |\n| megacli.bbu_temperature | temperature | Celsius |\n\n",integration_type:"collector",id:"go.d.plugin-megacli-MegaCLI_MegaRAID",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/megacli/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-memcached",plugin_name:"go.d.plugin",module_name:"memcached",monitored_instance:{name:"Memcached",link:"https://memcached.org/",categories:["data-collection.databases"],icon_filename:"memcached.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["memcached","memcache","cache","database"]},overview:"# Memcached\n\nPlugin: go.d.plugin\nModule: memcached\n\n## Overview\n\nMonitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching.\n\nIt reads the server's response to the `stats` command.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **memcached** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **memcached**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/memcached.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### UNIX socket access\n\nIf you are connecting to Memcached via its UNIX socket, ensure that the `netdata` user\nhas permission to access it. You can do this by:\n\n1. Adding the `netdata` user to the `memcache` group.\n   ```bash\n   sudo gpasswd -a netdata memcache\n   ```\n2. Configuring the socket permissions in `/etc/memcached.conf` so the group has access (e.g. `-a 770`).\n\n**Note**: You may need to restart Memcached after making these changes for them to take effect.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | Memcached server address (`IP:PORT`). | 127.0.0.1:11211 | yes |\n|  | timeout | Connection, read, write, and name resolution timeout (seconds). | 1 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **memcached** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the memcached data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _memcached_ (or scroll the list) to locate the **memcached** collector.\n5. Click the **+** next to the **memcached** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/memcached.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/memcached.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:11211\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:11211\n\n  - name: remote\n    address: 203.0.113.0:11211\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `memcached` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m memcached\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m memcached -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `memcached` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep memcached\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep memcached /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep memcached\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ memcached_cache_memory_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | cache memory utilization |\n| [ memcached_cache_fill_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | average rate the cache fills up (positive), or frees up (negative) space over the last hour |\n| [ memcached_out_of_cache_space_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memcached instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| memcached.cache | available, used | MiB |\n| memcached.net | in, out | kilobits/s |\n| memcached.connections | current, rejected, total | connections/s |\n| memcached.items | current, total | items |\n| memcached.evicted_reclaimed | reclaimed, evicted | items |\n| memcached.get | hints, misses | requests |\n| memcached.get_rate | rate | requests/s |\n| memcached.set_rate | rate | requests/s |\n| memcached.delete | hits, misses | requests |\n| memcached.cas | hits, misses, bad value | requests |\n| memcached.increment | hits, misses | requests |\n| memcached.decrement | hits, misses | requests |\n| memcached.touch | hits, misses | requests |\n| memcached.touch_rate | rate | requests/s |\n\n",integration_type:"collector",id:"go.d.plugin-memcached-Memcached",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/memcached/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-mongodb",plugin_name:"go.d.plugin",module_name:"mongodb",monitored_instance:{name:"MongoDB",link:"https://www.mongodb.com/",icon_filename:"mongodb.svg",categories:["data-collection.databases"]},keywords:["mongodb","databases"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# MongoDB\n\nPlugin: go.d.plugin\nModule: mongodb\n\n## Overview\n\nThis collector monitors MongoDB servers.\n\nExecuted queries:\n\n- [serverStatus](https://docs.mongodb.com/manual/reference/command/serverStatus/)\n- [dbStats](https://docs.mongodb.com/manual/reference/command/dbStats/)\n- [replSetGetStatus](https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **mongodb** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **mongodb**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/mongodb.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Create a read-only user\n\nCreate a read-only user for Netdata in the admin database.\n\n- Authenticate as the admin user:\n\n  ```bash\n  use admin\n  db.auth("admin", "<MONGODB_ADMIN_PASSWORD>")\n  ```\n\n- Create a user:\n\n  ```bash\n  db.createUser({\n  "user":"netdata",\n  "pwd": "<UNIQUE_PASSWORD>",\n  "roles" : [\n  {role: \'read\', db: \'admin\' },\n  {role: \'clusterMonitor\', db: \'admin\'},\n  {role: \'read\', db: \'local\' }\n  ]\n  })\n  ```\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | uri | MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/). | mongodb://localhost:27017 | yes |\n|  | timeout | Query timeout (seconds). | 1 | no |\n| **Filters** | databases | Database selector. Defines which databases to collect metrics from. |  | no |\n| **Functions** | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **mongodb** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the mongodb data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _mongodb_ (or scroll the list) to locate the **mongodb** collector.\n5. Click the **+** next to the **mongodb** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/mongodb.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mongodb.conf\n```\n\n##### Examples\n\n###### TCP socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    uri: mongodb://netdata:password@localhost:27017\n\n```\n{% /details %}\n###### With databases metrics\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    uri: mongodb://netdata:password@localhost:27017\n    databases:\n      includes:\n        - "* *"\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    uri: mongodb://netdata:password@localhost:27017\n\n  - name: remote\n    uri: mongodb://netdata:password@203.0.113.0:27017\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `mongodb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m mongodb\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m mongodb -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `mongodb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep mongodb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep mongodb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep mongodb\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves profiled query statistics from MongoDB [system.profile](https://www.mongodb.com/docs/manual/reference/database-profiler/) collection.\n\nThis function queries the `system.profile` collection across all user databases (excluding admin, local, config) to retrieve slow or sampled queries captured by the MongoDB profiler. It provides detailed execution metrics including timing, document counts, and execution plan information.\n\nUse cases:\n- Identify slow queries that exceed the profiling threshold\n- Analyze query patterns by examining docs examined vs docs returned ratios\n- Detect collection scans (COLLSCAN) that may need index optimization\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Mongodb:top-queries` |\n| Require Cloud | yes |\n| Performance | Reads from `system.profile` collection across all user databases:<br/>\u2022 Profiling itself adds overhead to MongoDB operations (typically 1-5%)<br/>\u2022 Default limit of 500 rows balances usefulness with performance |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Document field values in query filters<br/>\u2022 Personal information in inserted/updated documents<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to MongoDB<br/>\u2022 Profiling is enabled on at least one user database<br/>\u2022 Returns HTTP 503 if collector is still initializing or profiling is disabled on all databases<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Enable MongoDB profiling\n\nDatabase profiling must be enabled on each database you want to monitor, and the function must be enabled in the collector configuration.\n\n1. Enable profiling on a database (profile slow queries > 100ms):\n\n   ```javascript\n   use myDatabase\n   db.setProfilingLevel(1, { slowms: 100 })\n   ```\n\n2. Or profile all operations (level 2, use with caution):\n\n   ```javascript\n   db.setProfilingLevel(2)\n   ```\n\n3. Verify profiling status:\n\n   ```javascript\n   db.getProfilingStatus()\n   ```\n\n4. Enable the function in Netdata collector config:\n\n   ```yaml\n   jobs:\n     - name: local\n       uri: mongodb://localhost:27017\n       top_queries_function_enabled: true\n   ```\n\n:::info\n\n- Profiling level 0 = off, 1 = slow operations only, 2 = all operations\n- The `slowms` threshold determines which queries are captured at level 1\n- `system.profile` is a capped collection; old entries are automatically removed\n- System databases (admin, local, config) are excluded from profiling queries\n\n:::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Options include execution time, docs examined, keys examined, and more. Defaults to execution time to focus on slowest queries. | yes | execution_time |  |\n\n#### Returns\n\nProfiled query statistics from `system.profile`. Each row represents a single profiled operation with execution metrics and plan details.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Timestamp | timestamp |  |  | When the operation was profiled. Useful for correlating slow queries with application events. |\n| Namespace | string |  |  | Database and collection name in format `database.collection`. Identifies which collection the operation targeted. |\n| Operation | string |  |  | Type of operation: query, insert, update, remove, command, getmore. Helps categorize workload patterns. |\n| Query | string |  |  | The command document as JSON showing the query filter, projection, and options. Truncated to 4096 characters. |\n| Execution Time | duration | seconds |  | Total execution time of the operation. High values indicate slow queries that may need optimization. |\n| Docs Examined | integer |  |  | Number of documents scanned during execution. A high ratio of docs examined to docs returned suggests missing or inefficient indexes. |\n| Keys Examined | integer |  |  | Number of index keys scanned. Compare with docs examined to assess index efficiency. |\n| Docs Returned | integer |  |  | Number of documents returned to the client. Compare with docs examined to identify inefficient queries. |\n| Plan Summary | string |  |  | Execution plan summary (e.g., IXSCAN, COLLSCAN, SORT). COLLSCAN indicates a full collection scan that may need an index. |\n| Client | string |  |  | Client IP address or hostname that executed the operation. Useful for identifying query sources. |\n| User | string |  |  | Authenticated user who executed the operation. Empty for unauthenticated connections. |\n| Docs Deleted | integer |  | hidden | Number of documents deleted by the operation. Relevant for remove operations. |\n| Docs Inserted | integer |  | hidden | Number of documents inserted by the operation. Relevant for insert operations. |\n| Docs Modified | integer |  | hidden | Number of documents modified by the operation. Relevant for update operations. |\n| Response Length | integer |  | hidden | Size of the response in bytes. Large responses may indicate queries returning excessive data. |\n| Num Yield | integer |  | hidden | Number of times the operation yielded to allow other operations to proceed. High yields may indicate lock contention. |\n| App Name | string |  |  | Application name from the client connection string. Useful for identifying which application generated the query. |\n| Cursor Exhausted | string |  | hidden | Whether the cursor was fully exhausted (Yes/No). |\n| Has Sort Stage | string |  | hidden | Whether the query required an in-memory sort stage (Yes/No). In-memory sorts are slower than index-based sorts. |\n| Uses Disk | string |  | hidden | Whether the operation used disk for sorting or aggregation (Yes/No). Indicates memory pressure. |\n| From Multi Planner | string |  | hidden | Whether multiple query plans were evaluated (Yes/No). |\n| Replanned | string |  | hidden | Whether the query was replanned due to plan cache eviction (Yes/No). |\n| Query Hash | string |  | hidden | Hash of the query shape for identifying similar queries. Available in MongoDB 4.2+. |\n| Plan Cache Key | string |  | hidden | Key used for plan cache lookup. Available in MongoDB 4.2+. |\n| Planning Time | duration | seconds | hidden | Time spent planning the query execution. Available in MongoDB 6.2+. |\n| CPU Time | duration | seconds | hidden | CPU time consumed by the operation. Available in MongoDB 6.3+ on Linux only. |\n| Query Framework | string |  | hidden | Query execution framework used (classic or SBE). Available in MongoDB 7.0+. |\n| Query Shape Hash | string |  | hidden | Hash representing the query shape for grouping similar queries. Available in MongoDB 8.0+. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- WireTiger metrics are available only if [WiredTiger](https://docs.mongodb.com/v6.0/core/wiredtiger/) is used as the\nstorage engine.\n- Sharding metrics are available on shards only\nfor [mongos](https://www.mongodb.com/docs/manual/reference/program/mongos/).\n\n\n### Per MongoDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.operations_rate | reads, writes, commands | operations/s |\n| mongodb.operations_latency_time | reads, writes, commands | milliseconds |\n| mongodb.operations_by_type_rate | insert, query, update, delete, getmore, command | operations/s |\n| mongodb.document_operations_rate | inserted, deleted, returned, updated | operations/s |\n| mongodb.scanned_indexes_rate | scanned | indexes/s |\n| mongodb.scanned_documents_rate | scanned | documents/s |\n| mongodb.active_clients_count | readers, writers | clients |\n| mongodb.queued_operations_count | reads, writes | operations |\n| mongodb.cursors_open_count | open | cursors |\n| mongodb.cursors_open_no_timeout_count | open_no_timeout | cursors |\n| mongodb.cursors_opened_rate | opened | cursors/s |\n| mongodb.cursors_timed_out_rate | timed_out | cursors/s |\n| mongodb.cursors_by_lifespan_count | le_1s, 1s_5s, 5s_15s, 15s_30s, 30s_1m, 1m_10m, ge_10m | cursors |\n| mongodb.transactions_count | active, inactive, open, prepared | transactions |\n| mongodb.transactions_rate | started, aborted, committed, prepared | transactions/s |\n| mongodb.connections_usage | available, used | connections |\n| mongodb.connections_by_state_count | active, threaded, exhaust_is_master, exhaust_hello, awaiting_topology_changes | connections |\n| mongodb.connections_rate | created | connections/s |\n| mongodb.asserts_rate | regular, warning, msg, user, tripwire, rollovers | asserts/s |\n| mongodb.network_traffic_rate | in, out | bytes/s |\n| mongodb.network_requests_rate | requests | requests/s |\n| mongodb.network_slow_dns_resolutions_rate | slow_dns | resolutions/s |\n| mongodb.network_slow_ssl_handshakes_rate | slow_ssl | handshakes/s |\n| mongodb.memory_resident_size | used | bytes |\n| mongodb.memory_virtual_size | used | bytes |\n| mongodb.memory_page_faults_rate | pgfaults | pgfaults/s |\n| mongodb.memory_tcmalloc_stats | allocated, central_cache_freelist, transfer_cache_freelist, thread_cache_freelists, pageheap_freelist, pageheap_unmapped | bytes |\n| mongodb.wiredtiger_concurrent_read_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_concurrent_write_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_cache_usage | used | bytes |\n| mongodb.wiredtiger_cache_dirty_space_size | dirty | bytes |\n| mongodb.wiredtiger_cache_io_rate | read, written | pages/s |\n| mongodb.wiredtiger_cache_evictions_rate | unmodified, modified | pages/s |\n| mongodb.sharding_nodes_count | shard_aware, shard_unaware | nodes |\n| mongodb.sharding_sharded_databases_count | partitioned, unpartitioned | databases |\n| mongodb.sharding_sharded_collections_count | partitioned, unpartitioned | collections |\n\n### Per lock type\n\nThese metrics refer to the lock type.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| lock_type | lock type (e.g. global, database, collection, mutex) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.lock_acquisitions_rate | shared, exclusive, intent_shared, intent_exclusive | acquisitions/s |\n\n### Per commit type\n\nThese metrics refer to the commit type.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| commit_type | commit type (e.g. noShards, singleShard, singleWriteShard) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.transactions_commits_rate | success, fail | commits/s |\n| mongodb.transactions_commits_duration_time | commits | milliseconds |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.database_collection_count | collections | collections |\n| mongodb.database_indexes_count | indexes | indexes |\n| mongodb.database_views_count | views | views |\n| mongodb.database_documents_count | documents | documents |\n| mongodb.database_data_size | data_size | bytes |\n| mongodb.database_storage_size | storage_size | bytes |\n| mongodb.database_index_size | index_size | bytes |\n\n### Per replica set member\n\nThese metrics refer to the replica set member.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| repl_set_member | replica set member name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.repl_set_member_state | primary, startup, secondary, recovering, startup2, unknown, arbiter, down, rollback, removed | state |\n| mongodb.repl_set_member_health_status | up, down | status |\n| mongodb.repl_set_member_replication_lag_time | replication_lag | milliseconds |\n| mongodb.repl_set_member_heartbeat_latency_time | heartbeat_latency | milliseconds |\n| mongodb.repl_set_member_ping_rtt_time | ping_rtt | milliseconds |\n| mongodb.repl_set_member_uptime | uptime | seconds |\n\n### Per shard\n\nThese metrics refer to the shard.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| shard_id | shard id |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.sharding_shard_chunks_count | chunks | chunks |\n\n",integration_type:"collector",id:"go.d.plugin-mongodb-MongoDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/mongodb/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-monit",plugin_name:"go.d.plugin",module_name:"monit",monitored_instance:{name:"Monit",link:"https://mmonit.com/monit/",categories:["data-collection.synthetic-testing"],icon_filename:"monit.png"},related_resources:{integrations:{list:[]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["monit","mmonit","supervision tool","monitrc"]},overview:"# Monit\n\nPlugin: go.d.plugin\nModule: monit\n\n## Overview\n\nThis collector monitors status of Monit's service checks.\n\n\nIt sends HTTP requests to the Monit `/_status?format=xml&level=full` endpoint.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Monit instances running on localhost that are listening on port 2812.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:2812\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **monit** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **monit**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/monit.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable TCP PORT\n\nSee [Syntax for TCP port](https://mmonit.com/monit/documentation/monit.html#TCP-PORT) for details.\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:2812 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. | admin | no |\n|  | password | Password for Basic HTTP authentication. | monit | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **monit** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the monit data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _monit_ (or scroll the list) to locate the **monit** collector.\n5. Click the **+** next to the **monit** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/monit.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/monit.conf\n```\n\n##### Examples\n\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:2812\n    username: admin\n    password: monit\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nWith enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:2812\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:2812\n\n  - name: remote\n    url: http://192.0.2.1:2812\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `monit` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m monit\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m monit -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `monit` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep monit\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep monit /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep monit\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per service\n\nThese metrics refer to the monitored Service.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| server_hostname | Hostname of the Monit server. |\n| service_check_name | Service check name. |\n| service_check_type | Service check type. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| monit.service_check_status | ok, error, initializing, not_monitored | status |\n\n",integration_type:"collector",id:"go.d.plugin-monit-Monit",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/monit/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-mssql",plugin_name:"go.d.plugin",module_name:"mssql",monitored_instance:{name:"Microsoft SQL Server",link:"https://www.microsoft.com/en-us/sql-server",categories:["data-collection.databases"],icon_filename:"mssql.svg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""},keywords:["db","database","mssql","sql server","microsoft"]},overview:'# Microsoft SQL Server\n\nPlugin: go.d.plugin\nModule: mssql\n\n## Overview\n\nThis collector monitors the health and performance of Microsoft SQL Server instances.\n\nIt collects metrics from:\n- Performance counters (buffer manager, memory manager, SQL statistics)\n- Dynamic management views (DMVs) for wait statistics, locks, and sessions\n- Per-database transaction and lock statistics\n- SQL Server Agent job status\n\n\nIt connects to the SQL Server instance via TCP using the go-mssqldb driver and executes queries against:\n\n- `sys.dm_os_performance_counters` - Performance counter values\n- `sys.dm_exec_sessions` - Connection information\n- `sys.dm_os_wait_stats` - Wait statistics\n- `sys.dm_tran_locks` - Lock information\n- `sys.dm_io_virtual_file_stats` - I/O stall (latency) statistics\n- `sys.dm_os_process_memory` - SQL Server process memory\n- `sys.dm_os_sys_memory` - OS physical memory and page file\n- `sys.master_files` - Database file sizes\n- `msdb.dbo.sysjobs` - SQL Agent job status\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe monitoring user requires the VIEW SERVER STATE permission to access DMVs.\nSQL Agent job monitoring is part of collector startup, so access to\n`msdb.dbo.sysjobs` is required.\n\n\nMicrosoft SQL Server can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it tries to connect to SQL Server on localhost:1433 without authentication.\nYou must configure proper credentials for monitoring.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe collector executes lightweight queries against system views.\nMost queries complete in milliseconds and have minimal impact on server performance.\n\n',setup:'## Setup\n\n\nYou can configure the **mssql** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **mssql**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/mssql.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Create monitoring user\n\nCreate a SQL Server login with VIEW SERVER STATE permission:\n\n```sql\n-- Create login\nCREATE LOGIN netdata_user WITH PASSWORD = \'YourStrongPassword!\';\n\n-- Grant VIEW SERVER STATE (required for DMVs)\nGRANT VIEW SERVER STATE TO netdata_user;\n\n-- Grant access to msdb for SQL Agent job monitoring (required)\nUSE msdb;\nCREATE USER netdata_user FOR LOGIN netdata_user;\nGRANT SELECT ON dbo.sysjobs TO netdata_user;\n\n-- Optional: Grant access to distribution database for replication monitoring\n-- (only if replication is configured)\nUSE distribution;\nCREATE USER netdata_user FOR LOGIN netdata_user;\nGRANT SELECT ON dbo.MSreplication_monitordata TO netdata_user;\nGRANT SELECT ON dbo.MSpublications TO netdata_user;\nGRANT SELECT ON dbo.MSsubscriptions TO netdata_user;\n```\n\n**Required permissions:**\n- `VIEW SERVER STATE` - Access to dynamic management views\n- `SELECT on msdb.dbo.sysjobs` - SQL Agent job status monitoring\n\n**Optional permissions:**\n- `SELECT on distribution.dbo.MSreplication_monitordata` - Replication monitoring\n- `SELECT on distribution.dbo.MSpublications` - Publication information\n- `SELECT on distribution.dbo.MSsubscriptions` - Subscription counts\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | dsn | SQL Server DSN (Data Source Name). See [DSN syntax](https://github.com/microsoft/go-mssqldb#connection-parameters-and-dsn). | sqlserver://localhost:1433 | yes |\n|  | timeout | Query timeout (seconds). | 5 | no |\n| **Functions** | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout for top-queries function (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return in the top-queries response. | 500 | no |\n|  | functions.top_queries.time_window_days | Number of days of Query Store data to analyze. Set to 0 to include all available data. Smaller values improve query performance but show less history. | 7 | no |\n|  | functions.deadlock_info.disabled | Disable the [deadlock-info](#deadlock-info) function. | no | no |\n|  | functions.deadlock_info.timeout | Query timeout for deadlock-info function (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.deadlock_info.use_ring_buffer | Use ring_buffer instead of event_file for system_health session.<br/><br/>WARNING: Not recommended for production:<br/>\u2022 Data cleared on failover/restart<br/>\u2022 4 MB capacity limit<br/>\u2022 High CPU load during queries<br/><br/>Use only for Azure SQL Database without Blob Storage or testing. | no | no |\n|  | functions.error_info.disabled | Disable the [error-info](#error-info) function. | no | no |\n|  | functions.error_info.timeout | Query timeout for error-info function (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.error_info.session_name | Extended Events session name capturing error_reported events.<br/>Must be created by administrator with event_file (recommended) or ring_buffer target. | netdata_errors | no |\n|  | functions.error_info.use_ring_buffer | Use ring_buffer instead of event_file for error events.<br/><br/>WARNING: Not recommended for production:<br/>\u2022 Data cleared on failover/restart<br/>\u2022 4 MB capacity limit<br/>\u2022 High CPU load during queries<br/><br/>Use only for Azure SQL Database without Blob Storage or testing. | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **mssql** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the mssql data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _mssql_ (or scroll the list) to locate the **mssql** collector.\n5. Click the **+** next to the **mssql** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/mssql.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mssql.conf\n```\n\n##### Examples\n\n###### Basic configuration\n\nConnect to local SQL Server with SQL authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    dsn: "sqlserver://netdata_user:password@localhost:1433"\n\n```\n{% /details %}\n###### Windows Authentication\n\nConnect using Windows integrated authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    dsn: "sqlserver://localhost:1433?trusted_connection=yes"\n\n```\n{% /details %}\n###### Named instance\n\nConnect to a named SQL Server instance.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: named_instance\n    dsn: "sqlserver://netdata_user:password@localhost/INSTANCENAME"\n\n```\n{% /details %}\n###### Remote server\n\nConnect to a remote SQL Server.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: remote\n    dsn: "sqlserver://netdata_user:password@192.168.1.100:1433"\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMonitoring multiple SQL Server instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: production\n    dsn: "sqlserver://netdata_user:password@prod-sql:1433"\n\n  - name: development\n    dsn: "sqlserver://netdata_user:password@dev-sql:1433"\n\n```\n{% /details %}\n###### With custom function settings\n\nConfigure function-specific settings like timeouts and limits.\n\n> **Warning**: Query Store may contain unmasked literal values (PII).\n> Disable functions if not needed or ensure proper access controls.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    dsn: "sqlserver://netdata_user:password@localhost:1433"\n    functions:\n      top_queries:\n        limit: 100\n        time_window_days: 7\n      deadlock_info:\n        use_ring_buffer: true\n      error_info:\n        session_name: custom_errors\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `mssql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m mssql\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m mssql -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `mssql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep mssql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep mssql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep mssql\n```\n\n### Connection refused\n\nEnsure SQL Server is running and accepting TCP connections on the configured port.\nCheck that the SQL Server Browser service is running if using named instances.\n\n\n### Login failed\n\nVerify the username and password in the DSN are correct.\nEnsure SQL Server is configured for mixed mode authentication if using SQL logins.\n\n\n### Permission denied\n\nThe monitoring user needs VIEW SERVER STATE permission.\nGrant it with: `GRANT VIEW SERVER STATE TO netdata_user;`\n\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves aggregated SQL query performance metrics from Microsoft SQL Server [Query Store](https://learn.microsoft.com/en-us/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store) runtime statistics.\n\nThis function queries `sys.query_store_runtime_stats` and related views across all databases with Query Store enabled, aggregating execution statistics by query hash. It provides comprehensive timing, I/O, memory, and parallelism metrics.\n\nUse cases:\n- Identify slow or resource-intensive queries consuming excessive CPU time or memory\n- Analyze I/O patterns (logical reads, physical reads, writes) to detect bottlenecks\n- Monitor parallelism (DOP) and tempdb usage for capacity planning\n\nQuery text is truncated at 4096 characters for display purposes. Columns are dynamically detected based on SQL Server version (some metrics only available in 2016+/2017+).\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Mssql:top-queries` |\n| Require Cloud | yes |\n| Performance | Executes dynamic SQL to aggregate Query Store data across all enabled databases:<br/>\u2022 Execution time depends on Query Store workload and number of monitored databases<br/>\u2022 Default limit of 500 rows balances completeness with performance |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in WHERE clauses or INSERT values<br/>\u2022 Business data and internal identifiers<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to SQL Server<br/>\u2022 Query Store is enabled on at least one user database<br/>\u2022 Returns HTTP 503 if collector is still initializing<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Enable Query Store\n\nQuery Store must be enabled on each database you want to monitor.\n\n1. Verify Query Store is enabled on your databases:\n\n   ```sql\n   SELECT name, is_query_store_on\n   FROM sys.databases\n   WHERE name NOT IN ('master', 'tempdb', 'model', 'msdb');\n   ```\n\n2. Enable Query Store on databases where it is disabled:\n\n   ```sql\n   ALTER DATABASE [YourDatabaseName] SET QUERY_STORE = ON;\n   ```\n\n3. Enable the function in Netdata collector config:\n\n   ```yaml\n   jobs:\n     - name: local\n       dsn: \"sqlserver://user:pass@localhost:1433\"\n       query_store_function_enabled: true\n   ```\n\n:::info\n\n- Query Store is available in SQL Server 2016+ and Azure SQL Database\n- Requires ALTER DATABASE permission to enable Query Store\n- System databases (master, tempdb, model, msdb) are excluded from queries\n\n:::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. The available options depend on your SQL Server version and include metrics like total execution time, number of calls, CPU time, logical I/O, memory grants, and more. Default is Total Time to focus on most resource-intensive queries. | yes | totalTime |  |\n\n#### Returns\n\nAggregated query execution statistics from Query Store runtime views, providing comprehensive performance analysis across all monitored databases. Each row represents a unique query pattern (normalized query hash) with cumulative metrics across all its executions.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Query Hash | string |  | hidden | Unique hash identifier for the normalized query pattern. Queries with identical structure but different literal values share the same digest. |\n| Query | string |  |  | The SQL query text with literal values truncated at 4096 characters. Use this to identify the actual SQL being executed and spot parameterized queries or injection risks. |\n| Database | string |  |  | Database name where the query was executed. Essential for multi-database analysis to identify which database is experiencing query load. |\n| Calls | integer |  |  | Total number of times this query pattern has been executed. High values indicate frequently run queries that may impact server performance significantly. |\n| Error Attribution | string |  |  | Status of error detail attribution for this query. Values: enabled, no_data, not_enabled, not_supported. |\n| Error Number | integer |  |  | Most recent error number observed for this query (when error attribution is enabled). |\n| Error State | integer |  | hidden | SQL Server error state for the most recent error (when error attribution is enabled). |\n| Error Message | string |  |  | Most recent error message for this query (when error attribution is enabled). |\n| Hash Match Joins | integer |  |  | Count of Hash Match join operators across all stored plans for this query. |\n| Merge Joins | integer |  |  | Count of Merge Join operators across all stored plans for this query. |\n| Nested Loops | integer |  |  | Count of Nested Loops operators across all stored plans for this query. |\n| Sorts | integer |  |  | Count of Sort operators across all stored plans for this query. |\n| Total Time | duration | milliseconds |  | Cumulative execution time across all query executions. This is a key metric for identifying the most resource-intensive queries in terms of total server time consumption. |\n| Avg Time | duration | milliseconds |  | Average execution time per query run, calculated as weighted average when execution count is greater than zero. Compare with Total Time to determine if individual executions or high frequency drives resource usage. |\n| Last Time | duration | milliseconds | hidden | Execution time of the most recent execution for this query pattern. Useful for identifying recent performance changes or individual outlier executions. |\n| Min Time | duration | milliseconds | hidden | Minimum execution time observed. Helps identify variability in query performance and spot potential optimization opportunities for outliers. |\n| Max Time | duration | milliseconds | hidden | Maximum execution time observed. Large gaps between Min Time and Max Time may indicate performance instability due to parameter sniffing, data skew, or lock contention. |\n| StdDev Time | duration | milliseconds | hidden | Standard deviation of execution time. High values indicate inconsistent query performance, making capacity planning difficult and suggesting need for query optimization or consistent indexing. |\n| Avg CPU | duration | milliseconds |  | Average CPU time consumed per query execution. High values indicate CPU-intensive operations that may include complex calculations, string manipulations, or excessive function calls. Available in SQL Server 2016+. |\n| Last CPU | duration | milliseconds | hidden | CPU time of the most recent execution. Useful for identifying recent changes in query patterns and resource usage. |\n| Min CPU | duration | milliseconds | hidden | Minimum CPU time observed. Helps identify variability in CPU consumption and spot efficient vs. inefficient query executions. |\n| Max CPU | duration | milliseconds | hidden | Maximum CPU time observed. Spikes may indicate complex queries, large result sets, or parallelism issues. |\n| StdDev CPU | duration | milliseconds | hidden | Standard deviation of CPU time. High variability suggests inconsistent performance due to varying data volumes, plan cache hit rates, or changing execution contexts. |\n| Avg Logical Reads | float |  |  | Average number of logical read operations (8KB pages) per execution. High values indicate queries scanning large amounts of data through indexes or table scans. Monitor for I/O subsystem impact. |\n| Last Logical Reads | integer |  | hidden | Logical reads from the most recent execution. Useful for identifying immediate query patterns and recent performance changes. |\n| Min Logical Reads | integer |  | hidden | Minimum logical reads observed. Helps identify data access patterns and spot outliers. |\n| Max Logical Reads | integer |  | hidden | Maximum logical reads observed. Very high values may indicate full table scans, missing indexes, or inefficient join operations requiring excessive data access. |\n| StdDev Logical Reads | float |  | hidden | Standard deviation of logical reads. High variability suggests inconsistent access patterns, potentially indicating performance issues with certain queries or data volumes. |\n| Avg Logical Writes | float |  |  | Average number of logical write operations per execution. High values indicate heavy write workloads that may benefit from batching or optimization. |\n| Last Logical Writes | integer |  | hidden | Logical writes from the most recent execution. Helps track recent write activity and identify immediate performance impact. |\n| Min Logical Writes | integer |  | hidden | Minimum logical writes observed. Helps identify read-heavy vs. write-heavy query patterns and data access characteristics. |\n| Max Logical Writes | integer |  | hidden | Maximum logical writes observed. Spikes may indicate bulk insert/update operations, large transactions, or data migration activities. |\n| StdDev Logical Writes | float |  | hidden | Standard deviation of logical writes. High values indicate write performance variability, potentially suggesting inconsistent transaction sizes or periodic bulk operations. |\n| Avg Physical Reads | float |  |  | Average number of physical read operations from storage per execution. High values indicate queries requiring substantial disk I/O for data retrieval, potentially due to full table scans or missing covering indexes. |\n| Last Physical Reads | integer |  | hidden | Physical reads from the most recent execution. Useful for identifying immediate I/O patterns and recent storage subsystem pressure. |\n| Min Physical Reads | integer |  | hidden | Minimum physical reads observed. Helps baseline I/O patterns and identify read-intensive query scenarios. |\n| Max Physical Reads | integer |  | hidden | Maximum physical reads observed. Extremely high values may indicate storage subsystem bottlenecks, full table scans without covering indexes, or queries processing very large data volumes. |\n| StdDev Physical Reads | float |  | hidden | Standard deviation of physical reads. High variability suggests inconsistent disk access patterns, potentially indicating intermittent I/O performance issues or storage contention. |\n| Avg CLR Time | duration | milliseconds |  | Average CLR (Common Language Runtime) time per execution. High values indicate managed code (stored procedures, functions, triggers) with heavy computations, garbage collection pressure, or inefficient memory allocations. Available in SQL Server 2016+. |\n| Last CLR Time | duration | milliseconds | hidden | CLR time of the most recent execution. Useful for identifying recent managed code performance changes and detecting inefficient code deployments. |\n| Min CLR Time | duration | milliseconds | hidden | Minimum CLR time observed. Helps identify efficient managed code executions and spot expensive CLR operations. |\n| Max CLR Time | duration | milliseconds | hidden | Maximum CLR time observed. Spikes may indicate complex managed code operations, large object allocations, or expensive .NET framework method calls. |\n| StdDev CLR Time | duration | milliseconds | hidden | Standard deviation of CLR time. High variability suggests inconsistent managed code execution patterns, potentially varying by execution parameters, data volumes, or different code paths being taken. |\n| Avg DOP | float |  |  | Average Degree of Parallelism (DOP) per query. Higher values indicate queries utilizing more CPU cores through parallelism, potentially consuming significant server resources. Values above 1 indicate intra-query parallelism; values of 1 indicate serial execution. |\n| Last DOP | integer |  | hidden | DOP of the most recent execution. Helps track recent parallelism patterns and identify changes in query execution behavior. |\n| Min DOP | integer |  | hidden | Minimum DOP observed. Values of 0 may indicate serial execution; values above 1 suggest parallel query execution within individual queries. |\n| Max DOP | integer |  | hidden | Maximum DOP observed. Very high values (>4) may indicate aggressive parallelism consuming excessive resources and potentially affecting concurrent workloads. Available in SQL Server 2016+. |\n| StdDev DOP | float |  | hidden | Standard deviation of DOP. High variability suggests inconsistent parallelism patterns across executions, potentially indicating performance variability based on data characteristics or query complexity. |\n| Avg Memory (8KB pages) | float |  |  | Average memory grant (in 8KB pages) per execution. High values indicate memory-intensive queries that may benefit from index optimization, reduced result sets, or query tuning to reduce working memory usage. |\n| Last Memory (8KB pages) | integer |  | hidden | Memory grant from the most recent execution. Useful for identifying recent memory pressure and tracking immediate impact of resource-intensive queries. |\n| Min Memory (8KB pages) | integer |  | hidden | Minimum memory grant observed. Helps identify memory-efficient queries and baseline memory requirements for common operations. |\n| Max Memory (8KB pages) | integer |  | hidden | Maximum memory grant observed. Spikes may indicate queries with large sort operations, hash joins, temporary table creation, or excessive parameter lengths consuming working memory. |\n| StdDev Memory | float |  | hidden | Standard deviation of memory grants. High variability suggests inconsistent memory usage patterns, potentially varying by execution parameters, result set sizes, or different code paths being executed. |\n| Avg Rows | float |  |  | Average number of rows processed per query execution. High values indicate queries returning large result sets that may consume significant network bandwidth, memory for result buffers, and client application resources. |\n| Last Rows | integer |  | hidden | Row count from the most recent execution. Helps identify recent query patterns and track immediate data processing requirements. |\n| Min Rows | integer |  | hidden | Minimum rows observed. Helps identify data access patterns and spot outliers in result set sizes. |\n| Max Rows | integer |  | hidden | Maximum rows observed. Extremely high values may indicate full table scans without WHERE clauses, missing or inefficient filters, or data export operations. |\n| StdDev Rows | float |  | hidden | Standard deviation of rows processed. High variability suggests inconsistent result set sizes, potentially due to varying query filters, parameterized inputs, or different data distributions across executions. |\n| Avg Log Bytes | float |  |  | Average transaction log bytes written per query execution (SQL Server 2017+). High values indicate write-intensive operations (INSERT/UPDATE/DELETE), large transactions, or bulk modifications. This measures WAL activity, not diagnostic logging. |\n| Last Log Bytes | integer |  | hidden | Transaction log bytes from the most recent execution. Useful for tracking recent write activity. |\n| Min Log Bytes | integer |  | hidden | Minimum transaction log bytes observed. Helps identify write-efficient queries and baseline requirements. |\n| Max Log Bytes | integer |  | hidden | Maximum transaction log bytes observed. Spikes may indicate bulk operations, large transactions, or queries affecting many rows. |\n| StdDev Log Bytes | float |  | hidden | Standard deviation of transaction log bytes. High variability suggests inconsistent write patterns, potentially varying by the number of rows affected or transaction sizes. |\n| Avg TempDB (8KB pages) | float |  |  | Average tempdb space usage (in 8KB pages) per execution. High values indicate queries that create or use large temporary objects, work tables, sort operations, or have heavy tempdb spillage from disk. High tempdb usage can lead to disk I/O contention and overall performance degradation. |\n| Last TempDB (8KB pages) | integer |  | hidden | Tempdb space from the most recent execution. Useful for identifying recent tempdb pressure and tracking immediate disk I/O impact of resource-intensive queries. |\n| Min TempDB (8KB pages) | integer |  | hidden | Minimum tempdb space observed. Helps identify tempdb-efficient queries and baseline temporary object requirements for common operations. |\n| Max TempDB (8KB pages) | integer |  | hidden | Maximum tempdb space observed. Spikes may indicate queries with large sort operations, hash joins, index spool usage, or temporary table creation consuming substantial tempdb space. Can lead to tempdb autogrow and disk space issues. |\n| StdDev TempDB | float |  | hidden | Standard deviation of tempdb space usage. High variability suggests inconsistent temporary object usage patterns, potentially varying by query complexity, parameter types, or different data access patterns affecting temporary object creation. |\n\n### Deadlock Info\n\nRetrieves the most recent deadlock event from SQL Server's `system_health` Extended Events session (`xml_deadlock_report`).\n\nThe deadlock graph XML is parsed to attribute the deadlock to the participating processes and their query text, lock mode, lock status, and wait resource.\n\nUse cases:\n- Identify which process was chosen as the deadlock victim\n- Inspect the waiting resource and lock mode involved in the deadlock\n- Correlate deadlocks with recent application changes or deployments\n\nQuery text and wait resource strings are truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Mssql:deadlock-info` |\n| Require Cloud | yes |\n| Performance | Executes on-demand queries against the `system_health` ring buffer:<br/>\u2022 Not part of regular metric collection<br/>\u2022 Overhead is limited to function execution time and XML parsing |\n| Security | Query text and wait resource strings may include unmasked literal values including sensitive data (PII/secrets):<br/>\u2022 SQL literals such as emails, IDs, or tokens<br/>\u2022 Schema and table names that may be sensitive in some environments<br/>\u2022 Restrict dashboard access to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to SQL Server<br/>\u2022 `deadlock_info_function_enabled` is true<br/>\u2022 The account has `VIEW SERVER STATE` permission<br/>\u2022 Returns HTTP 200 with empty data when no deadlock is found<br/>\u2022 Returns HTTP 403 when permission is missing<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 561 when the deadlock graph cannot be parsed<br/>\u2022 Returns HTTP 503 if the collector is still initializing or the function is disabled<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\nNo additional configuration is required.\n\n#### Parameters\n\nThis function has no parameters.\n\n#### Returns\n\nParsed deadlock participants from the latest detected deadlock event. Each row represents one process involved in the deadlock.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Row ID | string |  | hidden | Unique row identifier composed of deadlock ID and process ID. |\n| Deadlock ID | string |  |  | Identifier for the deadlock event, derived from the deadlock timestamp to group participating processes. |\n| Timestamp | timestamp |  |  | Timestamp of the deadlock event from Extended Events when available; otherwise the function execution time. |\n| Process ID | string |  |  | Deadlock graph process identifier for the process involved in the deadlock. |\n| SPID | integer |  |  | SQL Server session ID (SPID) for the process when available. |\n| ECID | integer |  |  | Execution context ID (ECID) for parallel execution contexts when available. |\n| Victim | string |  |  | \"true\" when the process was chosen as the deadlock victim and rolled back; otherwise \"false\". |\n| Query | string |  |  | SQL query text for the process involved in the deadlock. Truncated to 4096 characters. |\n| Lock Mode | string |  |  | Lock mode reported for the process within the deadlock graph (for example X or S). |\n| Lock Status | string |  |  | Lock status for the process. WAITING indicates the process was waiting on a lock. |\n| Wait Resource | string |  |  | Lock resource identifier from the deadlock graph showing what the process was waiting on. |\n| Database | string |  |  | Database name mapped from the deadlock graph database ID when available. |\n\n### Error Info\n\nRetrieves recent SQL errors from a user-managed Extended Events session that captures `sqlserver.error_reported`\nwith both the `sql_text` and `query_hash` actions.\n\nThe session must be created by an administrator and include an `event_file` target. Netdata reads the event file\nand returns recent error events with error number, message, and SQL text. The `query_hash` action is required for\nreliable mapping into `top-queries` (query text fallback is best-effort).\n\nUse cases:\n- Identify recent query errors and their messages\n- Correlate errors to query text\n- Validate error rates seen in top-queries\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Mssql:error-info` |\n| Require Cloud | yes |\n| Performance | Executes on-demand queries against the configured Extended Events event file:<br/>\u2022 Not part of regular metric collection<br/>\u2022 Overhead is limited to function execution time |\n| Security | Error messages and query text may include unmasked literal values including sensitive data (PII/secrets):<br/>\u2022 Restrict dashboard access to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to SQL Server<br/>\u2022 `error_info_function_enabled` is true<br/>\u2022 The Extended Events session exists and has an event_file target<br/>\u2022 The account has `VIEW SERVER STATE` permission<br/>\u2022 Returns HTTP 200 with empty data when no errors are found<br/>\u2022 Returns HTTP 403 when permission is missing<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 503 if the session is not enabled or the function is disabled<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Create Extended Events session for error capture\n\nCreate an Extended Events session that captures `sqlserver.error_reported` with `sql_text` and `query_hash` actions:\n\n```sql\n-- Create the Extended Events session with event_file target\nCREATE EVENT SESSION [netdata_errors] ON SERVER\nADD EVENT sqlserver.error_reported(\n  ACTION(sqlserver.sql_text, sqlserver.query_hash)\n)\nADD TARGET package0.event_file(SET filename=N'netdata_errors');\nGO\n\n-- Start the session\nALTER EVENT SESSION [netdata_errors] ON SERVER STATE = START;\nGO\n\n-- Grant required permission\nGRANT VIEW SERVER STATE TO [netdata_user];\n```\n\nIf you use a different session name, set it in the collector config:\n\n```yaml\njobs:\n  - name: local\n    dsn: \"sqlserver://user:pass@localhost:1433\"\n    error_info_session_name: your_session_name\n```\n\n\n\n#### Parameters\n\nThis function has no parameters.\n\n#### Returns\n\nRecent error events from the configured Extended Events session.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Timestamp | timestamp |  |  | Timestamp of the error event. |\n| Error Number | integer |  |  | SQL Server error number. |\n| Error State | integer |  |  | SQL Server error state. |\n| Error Message | string |  |  | Error message text. |\n| Query | string |  |  | SQL text captured with the error event. |\n| Query Hash | string |  | hidden | Query hash captured with the error event (used for mapping into top-queries). |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Microsoft SQL Server instance\n\nThese metrics refer to the entire SQL Server instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | SQL Server 2016+ | Azure SQL Database |\n|:------|:----------|:----|:---:|:---:|\n| mssql.user_connections | user | connections | \u2022 | \u2022 |\n| mssql.session_connections | user, internal | connections | \u2022 | \u2022 |\n| mssql.blocked_processes | blocked | processes | \u2022 | \u2022 |\n| mssql.batch_requests | batch | requests/s | \u2022 | \u2022 |\n| mssql.compilations | compilations | compilations/s | \u2022 | \u2022 |\n| mssql.recompilations | recompilations | recompilations/s | \u2022 | \u2022 |\n| mssql.auto_param_attempts | total, safe, failed | attempts/s | \u2022 | \u2022 |\n| mssql.sql_errors | errors | errors/s | \u2022 | \u2022 |\n| mssql.buffer_cache_hit_ratio | hit_ratio | percentage | \u2022 | \u2022 |\n| mssql.buffer_page_life_expectancy | life_expectancy | seconds | \u2022 | \u2022 |\n| mssql.buffer_page_iops | read, written | pages/s | \u2022 | \u2022 |\n| mssql.buffer_checkpoint_pages | flushed | pages/s | \u2022 | \u2022 |\n| mssql.buffer_page_lookups | lookups | lookups/s | \u2022 | \u2022 |\n| mssql.buffer_lazy_writes | lazy_writes | writes/s | \u2022 | \u2022 |\n| mssql.memory_total | memory | bytes | \u2022 | \u2022 |\n| mssql.memory_connection | memory | bytes | \u2022 | \u2022 |\n| mssql.memory_pending_grants | pending | processes | \u2022 | \u2022 |\n| mssql.memory_external_benefit | benefit | benefit | \u2022 | \u2022 |\n| mssql.page_splits | page | splits/s | \u2022 | \u2022 |\n| mssql.process_memory_resident | resident | bytes | \u2022 | \u2022 |\n| mssql.process_memory_virtual | virtual | bytes | \u2022 | \u2022 |\n| mssql.process_memory_utilization | utilization | percentage | \u2022 | \u2022 |\n| mssql.process_page_faults | page_faults | faults | \u2022 | \u2022 |\n| mssql.os_memory | used, available | bytes | \u2022 | \u2022 |\n| mssql.os_pagefile | used, available | bytes | \u2022 | \u2022 |\n\n### Per database\n\nThese metrics refer to individual databases.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| database | Database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | SQL Server 2016+ | Azure SQL Database |\n|:------|:----------|:----|:---:|:---:|\n| mssql.database_active_transactions | active | transactions | \u2022 | \u2022 |\n| mssql.database_transactions | transactions | transactions/s | \u2022 | \u2022 |\n| mssql.database_write_transactions | write | transactions/s | \u2022 | \u2022 |\n| mssql.database_log_flushes | flushes | flushes/s | \u2022 | \u2022 |\n| mssql.database_log_flushed | flushed | bytes/s | \u2022 | \u2022 |\n| mssql.database_log_growths | growths | growths | \u2022 | \u2022 |\n| mssql.database_io_stall | read, write | ms | \u2022 | \u2022 |\n| mssql.database_data_file_size | size | bytes | \u2022 | \u2022 |\n| mssql.database_backup_restore_throughput | throughput | bytes/s | \u2022 | \u2022 |\n| mssql.database_state | online, restoring, recovering, pending, suspect, emergency, offline | state | \u2022 | \u2022 |\n| mssql.database_read_only | read_only, read_write | status | \u2022 | \u2022 |\n\n### Per lock stats\n\nThese metrics refer to lock statistics by lock resource type (from performance counters).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| resource | Lock resource type (Database, File, Object, Page, Key, Extent, RID, HoBT, etc.) |\n\nMetrics:\n\n| Metric | Dimensions | Unit | SQL Server 2016+ | Azure SQL Database |\n|:------|:----------|:----|:---:|:---:|\n| mssql.lock_stats_deadlocks | deadlocks | deadlocks/s | \u2022 | \u2022 |\n| mssql.lock_stats_waits | waits | waits/s | \u2022 | \u2022 |\n| mssql.lock_stats_timeouts | timeouts | timeouts/s | \u2022 | \u2022 |\n| mssql.lock_stats_requests | requests | requests/s | \u2022 | \u2022 |\n\n### Per lock resource\n\nThese metrics refer to lock resource types (from sys.dm_tran_locks).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| resource | Lock resource type (Database, File, Object, Page, Key, etc.) |\n\nMetrics:\n\n| Metric | Dimensions | Unit | SQL Server 2016+ | Azure SQL Database |\n|:------|:----------|:----|:---:|:---:|\n| mssql.locks_by_resource | locks | locks | \u2022 | \u2022 |\n\n### Per wait type\n\nThese metrics refer to individual wait types (from sys.dm_os_wait_stats).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| wait_type | Wait type name |\n| wait_category | Wait category (CPU, Lock, Latch, Buffer IO, etc.) |\n\nMetrics:\n\n| Metric | Dimensions | Unit | SQL Server 2016+ | Azure SQL Database |\n|:------|:----------|:----|:---:|:---:|\n| mssql.wait_total_time | duration | ms | \u2022 | \u2022 |\n| mssql.wait_resource_time | duration | ms | \u2022 | \u2022 |\n| mssql.wait_signal_time | duration | ms | \u2022 | \u2022 |\n| mssql.wait_max_time | max_time | ms | \u2022 | \u2022 |\n| mssql.wait_count | waits | waits/s | \u2022 | \u2022 |\n\n### Per job\n\nThese metrics refer to SQL Server Agent jobs.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| job_name | Job name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | SQL Server 2016+ | Azure SQL Database |\n|:------|:----------|:----|:---:|:---:|\n| mssql.job_status | enabled, disabled | status | \u2022 | \u2022 |\n\n### Per replication\n\nThese metrics refer to SQL Server replication publications.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| publisher_db | Publisher database name |\n| publication | Publication name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | SQL Server 2016+ | Azure SQL Database |\n|:------|:----------|:----|:---:|:---:|\n| mssql.replication_status | started, succeeded, in_progress, idle, retrying, failed | status | \u2022 | \u2022 |\n| mssql.replication_warning | expiration, latency, merge_expiration, merge_slow_duration, merge_fast_duration, merge_fast_speed, merge_slow_speed | flags | \u2022 | \u2022 |\n| mssql.replication_latency | average, best, worst | seconds | \u2022 | \u2022 |\n| mssql.replication_subscriptions | total, agents_running | subscriptions | \u2022 | \u2022 |\n\n",integration_type:"collector",id:"go.d.plugin-mssql-Microsoft_SQL_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/mssql/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-mariadb",plugin_name:"go.d.plugin",module_name:"mysql",monitored_instance:{name:"MariaDB",link:"https://mariadb.org/",icon_filename:"mariadb.svg",categories:["data-collection.databases"]},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""},keywords:["db","database","mysql","maria","mariadb","sql"]},overview:'# MariaDB\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW ENGINE INNODB STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nMariaDB can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP sockets:\n\n- 127.0.0.1:3306\n- "[::1]:3306"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:"## Setup\n\n\nYou can configure the **mysql** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **mysql**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/mysql.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the following [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n- **MySQL and MariaDB < 10.5.9**\n\n  ```mysql\n  CREATE USER 'netdata'@'localhost';\n  GRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\n  FLUSH PRIVILEGES;\n  ```\n\n- **MariaDB >= 10.5.9**\n\n  For MariaDB 10.5.9 and later, use the `SLAVE MONITOR` privilege instead of `REPLICATION CLIENT`:\n\n  ```mysql\n  CREATE USER 'netdata'@'localhost';\n  GRANT USAGE, SLAVE MONITOR, PROCESS ON *.* TO 'netdata'@'localhost';\n  FLUSH PRIVILEGES;\n  ```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password.\nIt will only be able to gather statistics without being able to alter or affect operations in any way.\n\n\n#### Enable User Statistics (optional)\n\nTo collect per-user statistics, the [User Statistics](https://mariadb.com/docs/server/ha-and-performance/optimization-and-tuning/query-optimizations/statistics-for-optimizing-queries/user-statistics) plugin must be enabled.\nThis is available for **MariaDB** and **Percona**, not for MySQL.\n\nBy default, statistics are not collected. To enable the plugin, set the `userstat` system variable.\n\n- **In a configuration file** (persistent, requires restart):\n\n  ```ini\n  [mariadb]\n  userstat = 1\n  ```\n\n- **Dynamically** (takes effect immediately, does not persist across restarts):\n\n  ```mysql\n  SET GLOBAL userstat=1;\n  ```\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n|  | my.cnf | Path to a `my.cnf` file to read connection settings from the `[client]` section. |  | no |\n|  | timeout | Query timeout (seconds). | 1 | no |\n| **Functions** | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n|  | functions.deadlock_info.disabled | Disable the [deadlock-info](#deadlock-info) function. | no | no |\n|  | functions.deadlock_info.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.error_info.disabled | Disable the [error-info](#error-info) function. | no | no |\n|  | functions.error_info.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **mysql** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the mysql data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _mysql_ (or scroll the list) to locate the **mysql** collector.\n5. Click the **+** next to the **mysql** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n\n##### Examples\n\n###### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n###### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n###### Connection with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n###### my.cnf\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: netdata@tcp(127.0.0.1:3306)/\n\n  - name: remote\n    dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m mysql\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m mysql -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `mysql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep mysql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep mysql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep mysql\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves aggregated SQL query performance metrics from MySQL [performance_schema.events_statements_summary_by_digest](https://dev.mysql.com/doc/refman/8.4/en/performance-schema-statement-summary-tables.html) table.\n\nThis function queries the `events_statements_summary_by_digest` table which contains aggregated statistics for SQL statements grouped by their digest (normalized query pattern). The function dynamically detects available columns based on your MySQL/MariaDB version.\n\nUse cases:\n- Identify slow queries that consume the most execution time\n- Find frequently executed queries that may benefit from optimization\n- Detect queries with high lock time, errors, or table scans\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Mysql:top-queries` |\n| Require Cloud | yes |\n| Performance | Queries the `events_statements_summary_by_digest` table:<br/>\u2022 On busy servers with high query throughput, the digest table can grow large<br/>\u2022 Default limit of 500 rows balances usefulness with performance |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in WHERE clauses or INSERT values<br/>\u2022 Business data and internal identifiers<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to MySQL<br/>\u2022 Performance Schema is enabled with statement digest collection<br/>\u2022 Returns HTTP 503 if collector is still initializing<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Enable performance_schema statement digest collection\n\nPerformance Schema must be enabled and statement instrumentation must be configured to collect digest statistics.\n\n1. Check if Performance Schema is enabled:\n   ```sql\n   SELECT @@performance_schema;\n   ```\n\n2. Check statement instrumentation configuration:\n   ```sql\n   SELECT * FROM performance_schema.setup_consumers\n   WHERE NAME LIKE '%statement%';\n   ```\n\n3. The following consumer should be enabled:\n   - `events_statements_summary_by_digest`\n\n4. Enable statement consumers if needed:\n   ```sql\n   UPDATE performance_schema.setup_consumers\n   SET ENABLED = 'YES'\n   WHERE NAME LIKE 'events_statements%';\n   ```\n\n   :::info\n\n   - Changes to `setup_consumers` take effect immediately without requiring a server restart.\n   - MariaDB also supports the `events_statements_summary_by_digest` table. Exact consumer names may vary by MariaDB version, so checking `setup_consumers` first as shown above is recommended.\n\n   :::\n\n5. Verify digest table contains data:\n   ```sql\n   SELECT COUNT(*) FROM performance_schema.events_statements_summary_by_digest;\n   ```\n\n   Note: Statement digest data is accumulated since server startup or since the table was last truncated. To reset statistics:\n   ```sql\n   TRUNCATE TABLE performance_schema.events_statements_summary_by_digest;\n   ```\n\n   Ensure that statement instruments are enabled in the Performance Schema so that statement digest statistics are collected. Refer to your MySQL or MariaDB version documentation for the appropriate configuration options.\n\n\n##### Grant SELECT permission on Performance Schema tables\n\nThe netdata user must have SELECT permission on Performance Schema tables. The standard collector permissions\n(USAGE, REPLICATION CLIENT, PROCESS) do not automatically include Performance Schema access.\n\n1. Grant the required permission:\n   ```sql\n   GRANT SELECT ON performance_schema.* TO 'netdata'@'localhost';\n   FLUSH PRIVILEGES;\n   ```\n\n   :::info\n\n   The host part (`'localhost'`) should match how the netdata user connects. If connecting via TCP/IP, you may need `'netdata'@'%'` or a specific IP address instead.\n\n   :::\n\n2. Verify access:\n   ```sql\n   -- As the netdata user:\n   SELECT COUNT(*) FROM performance_schema.events_statements_summary_by_digest;\n   ```\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. The available options depend on your MySQL/MariaDB version and include metrics like total execution time, number of calls, lock time, errors, rows examined, and more. Defaults to total execution time. | yes | totalTime |  |\n\n#### Returns\n\nAggregated statement statistics from Performance Schema, grouped by query digest. Each row represents a unique query pattern with cumulative metrics across all executions.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Digest | string |  | hidden | Unique hash identifier for the normalized query pattern. Queries with the same structure (different literal values) share the same digest. |\n| Query | string |  |  | Normalized SQL query text with literals replaced by placeholders (e.g., '?' for values). Truncated to 4096 characters. |\n| Schema | string |  |  | Database schema name where the query was executed. Empty string for queries without a schema context. |\n| Calls | integer |  |  | Total number of times this query pattern has been executed since server startup or since the digest table was last truncated. |\n| Total Time | duration | milliseconds |  | Cumulative execution time across all executions. High values indicate queries that consume significant server resources. |\n| Min Time | duration | milliseconds | hidden | Minimum execution time observed for a single execution. Helps identify variability in query performance. |\n| Avg Time | duration | milliseconds |  | Average execution time (total time divided by calls). Use this to compare performance across different query patterns. |\n| Max Time | duration | milliseconds | hidden | Maximum execution time observed for a single execution. Large gaps between min and max may indicate performance instability. |\n| Lock Time | duration | milliseconds |  | Total time spent waiting for table locks across all executions. High lock time may indicate contention from concurrent transactions. |\n| Errors | integer |  |  | Total number of times this query pattern resulted in an error. Non-zero values require investigation into the underlying issue. |\n| Warnings | integer |  |  | Total number of times this query pattern generated warnings. Warnings may indicate data type conversions, NULL handling issues, or other non-critical problems. |\n| Error Attribution | string |  |  | Status of error detail attribution for this query. Values: enabled (error details available), no_data (no recent error for this digest), not_enabled (statement history consumers disabled), not_supported (required columns unavailable). |\n| Error Number | integer |  |  | Most recent error number observed for this query digest (when error attribution is enabled). |\n| SQL State | string |  | hidden | SQLSTATE code for the most recent error (when error attribution is enabled). |\n| Error Message | string |  |  | Most recent error message for this query digest (when error attribution is enabled). |\n| Rows Affected | integer |  |  | Total number of rows modified by INSERT, UPDATE, DELETE, or REPLACE statements. Useful for tracking write workloads. |\n| Rows Sent | integer |  |  | Total number of rows returned to the client by SELECT statements. High values may indicate result sets that are too large. |\n| Rows Examined | integer |  |  | Total number of rows read during query execution. A high ratio of rows examined to rows sent suggests missing or inefficient indexes. |\n| Temp Disk Tables | integer |  |  | Total number of temporary tables created on disk across all executions. Disk-based temporary tables are significantly slower than in-memory tables and may indicate memory pressure or complex operations requiring sorting/grouping. |\n| Temp Tables | integer |  |  | Total number of temporary tables created (both in-memory and on-disk). High values suggest frequent sorting, grouping, or DISTINCT operations. |\n| Full Joins | integer |  |  | Total number of joins that performed a full table scan without using an index. These are typically very expensive operations that should be optimized. |\n| Full Range Joins | integer |  | hidden | Total number of joins that used a range scan on the first table. Less efficient than indexed joins but better than full scans. |\n| Select Range | integer |  | hidden | Total number of joins that used a range on the first table for row selection. |\n| Select Range Check | integer |  | hidden | Total number of joins that checked each row after scanning for key ranges. Very inefficient operation. |\n| Select Scan | integer |  |  | Total number of joins that performed a full scan of the first table. Indicates missing indexes or suboptimal join order. |\n| Sort Merge Passes | integer |  | hidden | Total number of merge passes performed during sort operations. More passes indicate larger datasets that exceed sort buffer size. |\n| Sort Range | integer |  | hidden | Total number of sorts that used a range scan. |\n| Sort Rows | integer |  |  | Total number of rows sorted across all executions. High values indicate frequent sorting operations on large datasets. |\n| Sort Scan | integer |  | hidden | Total number of sorts that required a full table scan. |\n| No Index Used | integer |  |  | Total number of executions where no index was used for table access. These queries are prime candidates for index optimization. |\n| No Good Index Used | integer |  | hidden | Total number of executions where a non-optimal index was used. Indicates that while an index exists, a better one might improve performance. |\n| First Seen | string |  | hidden | Timestamp when this query pattern was first observed. Helps identify new queries that may have been introduced by application changes. |\n| Last Seen | string |  | hidden | Timestamp when this query pattern was last executed. Can help identify stale queries that are no longer in use. |\n| P95 Time | duration | milliseconds |  | 95th percentile execution time. 95% of executions completed within this time. Available in MySQL 8.0+. Useful for understanding typical performance. |\n| P99 Time | duration | milliseconds |  | 99th percentile execution time. 99% of executions completed within this time. Available in MySQL 8.0+. Helps identify outlier slow executions. |\n| P99.9 Time | duration | milliseconds | hidden | 99.9th percentile execution time. Available in MySQL 8.0+. Identifies extreme outliers in query performance. |\n| Sample Query | string |  | hidden | Example of an actual query execution with literal values preserved. Available in MySQL 8.0+. Helpful for understanding the exact queries being executed. |\n| Sample Seen | string |  | hidden | Timestamp when the sample query was captured. Available in MySQL 8.0+. |\n| Sample Time | duration | milliseconds | hidden | Execution time of the captured sample query. Available in MySQL 8.0+. |\n| CPU Time | duration | milliseconds |  | Total CPU time consumed across all executions. Available in MySQL 8.0.28+. Helps identify CPU-intensive queries. |\n| Max Controlled Memory | integer |  |  | Maximum memory controlled by the query executor for this query pattern. Available in MySQL 8.0.31+. Helps identify memory-intensive operations. |\n| Max Total Memory | integer |  |  | Maximum total memory used by this query pattern including both controlled and uncontrolled allocations. Available in MySQL 8.0.31+. |\n\n### Deadlock Info\n\nRetrieves the latest detected InnoDB deadlock from `SHOW ENGINE INNODB STATUS`.\n\nThe output is parsed to attribute the deadlock to the participating transactions and their query text, lock mode, lock status, and wait resource.\n\nUse cases:\n- Identify which query was chosen as the deadlock victim\n- Inspect the waiting lock resource and lock mode\n- Correlate deadlocks with application changes or deployment events\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Mysql:deadlock-info` |\n| Require Cloud | yes |\n| Performance | Executes `SHOW ENGINE INNODB STATUS` on demand:<br/>\u2022 Not part of regular collection<br/>\u2022 Query cost depends on server load and the size of the InnoDB status output |\n| Security | Query text and wait resource strings may include unmasked literal values including sensitive data (PII/secrets):<br/>\u2022 SQL literals such as emails, IDs, or tokens<br/>\u2022 Schema and table names that may be sensitive in some environments<br/>\u2022 Restrict dashboard access to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to MySQL<br/>\u2022 `deadlock_info_function_enabled` is true<br/>\u2022 The account can run `SHOW ENGINE INNODB STATUS` (PROCESS privilege)<br/>\u2022 Returns HTTP 200 with empty data when no deadlock is found<br/>\u2022 Returns HTTP 403 when PROCESS privilege is missing<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out<br/>\u2022 Returns HTTP 561 when the deadlock section cannot be parsed<br/>\u2022 Returns HTTP 503 if the collector is still initializing or the function is disabled |\n\n#### Prerequisites\n\n##### Enable deadlock-info function in Netdata\n\nSet `deadlock_info_function_enabled: true` in the `go.d/mysql.conf` job.\n\n\n##### Grant PROCESS privilege\n\nThe monitoring user must have PROCESS privilege to run `SHOW ENGINE INNODB STATUS`.\n\n\n\n#### Parameters\n\nThis function has no parameters.\n\n#### Returns\n\nParsed deadlock participants from the latest detected deadlock. Each row represents one transaction involved in the deadlock.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Row ID | string |  | hidden | Unique row identifier composed of deadlock ID and process ID. |\n| Deadlock ID | string |  |  | Identifier for the deadlock event, used to group participating transactions. |\n| Timestamp | timestamp |  |  | Timestamp of the deadlock event. Parsed from the deadlock section when available; otherwise the function execution time. |\n| Process ID | string |  |  | MySQL thread id of the transaction involved in the deadlock. |\n| Connection ID | integer |  |  | Numeric connection identifier when the process id is numeric. |\n| ECID | integer |  |  | Execution context id (engine-specific). This is typically null for MySQL and reserved for cross-engine consistency. |\n| Victim | string |  |  | \"true\" when the transaction was chosen as the deadlock victim and rolled back; otherwise \"false\". |\n| Query | string |  |  | SQL query text for the transaction involved in the deadlock. Truncated to 4096 characters. |\n| Lock Mode | string |  |  | Lock mode reported for the waiting lock (for example X or S). |\n| Lock Status | string |  |  | Lock status for the transaction. WAITING indicates the transaction was waiting on a lock. |\n| Wait Resource | string |  |  | Lock resource line from InnoDB status showing what the transaction was waiting on. |\n| Database | string |  |  | Database name when it can be inferred. This may be empty or null depending on the deadlock output. |\n\n### Error Info\n\nRetrieves recent SQL errors from Performance Schema statement history tables.\n\nThis function reads `performance_schema.events_statements_history_long` when enabled,\notherwise falls back to `performance_schema.events_statements_history`. It reports the\nmost recent error per query digest, including error number, SQLSTATE, and message.\n\nUse cases:\n- Identify recent query errors and their messages\n- Correlate errors to query patterns (digest)\n- Validate error rates seen in top-queries\n\nError messages are truncated by Performance Schema (usually 128 characters).\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Mysql:error-info` |\n| Require Cloud | yes |\n| Performance | Reads Performance Schema statement history tables on demand:<br/>\u2022 Not part of regular collection<br/>\u2022 Query cost depends on history table size and server load |\n| Security | Error messages and query text may include unmasked literals (PII/secrets).<br/>\u2022 Restrict dashboard access to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to MySQL<br/>\u2022 `error_info_function_enabled` is true<br/>\u2022 Performance Schema statement history consumers are enabled (history and/or history_long)<br/>\u2022 Returns HTTP 200 with empty data when no errors are found<br/>\u2022 Returns HTTP 503 when required consumers are not enabled or function disabled<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Enable error-info function in Netdata\n\nSet `error_info_function_enabled: true` in the `go.d/mysql.conf` job.\n\n\n##### Enable statement history consumers\n\nEnsure `events_statements_history` and/or `events_statements_history_long` consumers are enabled.\n\n\n##### Grant SELECT on Performance Schema\n\nThe monitoring user must have SELECT on `performance_schema.*` to read statement history tables.\n\n\n\n#### Parameters\n\nThis function has no parameters.\n\n#### Returns\n\nMost recent error per query digest from Performance Schema history tables.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Digest | string |  | hidden | Unique hash identifier for the normalized query pattern. |\n| Query | string |  |  | Normalized query text when available (digest text or SQL text). |\n| Schema | string |  |  | Database schema name when available. |\n| Error Number | integer |  |  | MySQL error number for the most recent error of this digest. |\n| SQL State | string |  |  | SQLSTATE code for the most recent error. |\n| Error Message | string |  |  | Error message for the most recent error. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_redo_log_activity | redo_written, checkpointed | B/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_redo_log_occupancy | occupancy | percentage | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_redo_log_checkpoint_age | age | B | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage |   | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s |   | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s |   | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s |   | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s |   | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s |   | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s |   | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s |   | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s |   | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s |   | \u2022 | \u2022 |\n\n",integration_type:"collector",id:"go.d.plugin-mysql-MariaDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/mysql/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-mysql",plugin_name:"go.d.plugin",module_name:"mysql",monitored_instance:{name:"MySQL",link:"https://www.mysql.com/",categories:["data-collection.databases"],icon_filename:"mysql.svg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""},keywords:["db","database","mysql","maria","mariadb","sql"]},overview:'# MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW ENGINE INNODB STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nMySQL can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP sockets:\n\n- 127.0.0.1:3306\n- "[::1]:3306"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:"## Setup\n\n\nYou can configure the **mysql** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **mysql**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/mysql.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the following [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n- **MySQL and MariaDB < 10.5.9**\n\n  ```mysql\n  CREATE USER 'netdata'@'localhost';\n  GRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\n  FLUSH PRIVILEGES;\n  ```\n\n- **MariaDB >= 10.5.9**\n\n  For MariaDB 10.5.9 and later, use the `SLAVE MONITOR` privilege instead of `REPLICATION CLIENT`:\n\n  ```mysql\n  CREATE USER 'netdata'@'localhost';\n  GRANT USAGE, SLAVE MONITOR, PROCESS ON *.* TO 'netdata'@'localhost';\n  FLUSH PRIVILEGES;\n  ```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password.\nIt will only be able to gather statistics without being able to alter or affect operations in any way.\n\n\n#### Enable User Statistics (optional)\n\nTo collect per-user statistics, the [User Statistics](https://mariadb.com/docs/server/ha-and-performance/optimization-and-tuning/query-optimizations/statistics-for-optimizing-queries/user-statistics) plugin must be enabled.\nThis is available for **MariaDB** and **Percona**, not for MySQL.\n\nBy default, statistics are not collected. To enable the plugin, set the `userstat` system variable.\n\n- **In a configuration file** (persistent, requires restart):\n\n  ```ini\n  [mariadb]\n  userstat = 1\n  ```\n\n- **Dynamically** (takes effect immediately, does not persist across restarts):\n\n  ```mysql\n  SET GLOBAL userstat=1;\n  ```\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n|  | my.cnf | Path to a `my.cnf` file to read connection settings from the `[client]` section. |  | no |\n|  | timeout | Query timeout (seconds). | 1 | no |\n| **Functions** | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n|  | functions.deadlock_info.disabled | Disable the [deadlock-info](#deadlock-info) function. | no | no |\n|  | functions.deadlock_info.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.error_info.disabled | Disable the [error-info](#error-info) function. | no | no |\n|  | functions.error_info.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **mysql** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the mysql data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _mysql_ (or scroll the list) to locate the **mysql** collector.\n5. Click the **+** next to the **mysql** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n\n##### Examples\n\n###### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n###### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n###### Connection with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n###### my.cnf\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: netdata@tcp(127.0.0.1:3306)/\n\n  - name: remote\n    dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m mysql\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m mysql -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `mysql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep mysql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep mysql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep mysql\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves aggregated SQL query performance metrics from MySQL [performance_schema.events_statements_summary_by_digest](https://dev.mysql.com/doc/refman/8.4/en/performance-schema-statement-summary-tables.html) table.\n\nThis function queries the `events_statements_summary_by_digest` table which contains aggregated statistics for SQL statements grouped by their digest (normalized query pattern). The function dynamically detects available columns based on your MySQL/MariaDB version.\n\nUse cases:\n- Identify slow queries that consume the most execution time\n- Find frequently executed queries that may benefit from optimization\n- Detect queries with high lock time, errors, or table scans\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Mysql:top-queries` |\n| Require Cloud | yes |\n| Performance | Queries the `events_statements_summary_by_digest` table:<br/>\u2022 On busy servers with high query throughput, the digest table can grow large<br/>\u2022 Default limit of 500 rows balances usefulness with performance |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in WHERE clauses or INSERT values<br/>\u2022 Business data and internal identifiers<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to MySQL<br/>\u2022 Performance Schema is enabled with statement digest collection<br/>\u2022 Returns HTTP 503 if collector is still initializing<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Enable performance_schema statement digest collection\n\nPerformance Schema must be enabled and statement instrumentation must be configured to collect digest statistics.\n\n1. Check if Performance Schema is enabled:\n   ```sql\n   SELECT @@performance_schema;\n   ```\n\n2. Check statement instrumentation configuration:\n   ```sql\n   SELECT * FROM performance_schema.setup_consumers\n   WHERE NAME LIKE '%statement%';\n   ```\n\n3. The following consumer should be enabled:\n   - `events_statements_summary_by_digest`\n\n4. Enable statement consumers if needed:\n   ```sql\n   UPDATE performance_schema.setup_consumers\n   SET ENABLED = 'YES'\n   WHERE NAME LIKE 'events_statements%';\n   ```\n\n   :::info\n\n   - Changes to `setup_consumers` take effect immediately without requiring a server restart.\n   - MariaDB also supports the `events_statements_summary_by_digest` table. Exact consumer names may vary by MariaDB version, so checking `setup_consumers` first as shown above is recommended.\n\n   :::\n\n5. Verify digest table contains data:\n   ```sql\n   SELECT COUNT(*) FROM performance_schema.events_statements_summary_by_digest;\n   ```\n\n   Note: Statement digest data is accumulated since server startup or since the table was last truncated. To reset statistics:\n   ```sql\n   TRUNCATE TABLE performance_schema.events_statements_summary_by_digest;\n   ```\n\n   Ensure that statement instruments are enabled in the Performance Schema so that statement digest statistics are collected. Refer to your MySQL or MariaDB version documentation for the appropriate configuration options.\n\n\n##### Grant SELECT permission on Performance Schema tables\n\nThe netdata user must have SELECT permission on Performance Schema tables. The standard collector permissions\n(USAGE, REPLICATION CLIENT, PROCESS) do not automatically include Performance Schema access.\n\n1. Grant the required permission:\n   ```sql\n   GRANT SELECT ON performance_schema.* TO 'netdata'@'localhost';\n   FLUSH PRIVILEGES;\n   ```\n\n   :::info\n\n   The host part (`'localhost'`) should match how the netdata user connects. If connecting via TCP/IP, you may need `'netdata'@'%'` or a specific IP address instead.\n\n   :::\n\n2. Verify access:\n   ```sql\n   -- As the netdata user:\n   SELECT COUNT(*) FROM performance_schema.events_statements_summary_by_digest;\n   ```\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. The available options depend on your MySQL/MariaDB version and include metrics like total execution time, number of calls, lock time, errors, rows examined, and more. Defaults to total execution time. | yes | totalTime |  |\n\n#### Returns\n\nAggregated statement statistics from Performance Schema, grouped by query digest. Each row represents a unique query pattern with cumulative metrics across all executions.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Digest | string |  | hidden | Unique hash identifier for the normalized query pattern. Queries with the same structure (different literal values) share the same digest. |\n| Query | string |  |  | Normalized SQL query text with literals replaced by placeholders (e.g., '?' for values). Truncated to 4096 characters. |\n| Schema | string |  |  | Database schema name where the query was executed. Empty string for queries without a schema context. |\n| Calls | integer |  |  | Total number of times this query pattern has been executed since server startup or since the digest table was last truncated. |\n| Total Time | duration | milliseconds |  | Cumulative execution time across all executions. High values indicate queries that consume significant server resources. |\n| Min Time | duration | milliseconds | hidden | Minimum execution time observed for a single execution. Helps identify variability in query performance. |\n| Avg Time | duration | milliseconds |  | Average execution time (total time divided by calls). Use this to compare performance across different query patterns. |\n| Max Time | duration | milliseconds | hidden | Maximum execution time observed for a single execution. Large gaps between min and max may indicate performance instability. |\n| Lock Time | duration | milliseconds |  | Total time spent waiting for table locks across all executions. High lock time may indicate contention from concurrent transactions. |\n| Errors | integer |  |  | Total number of times this query pattern resulted in an error. Non-zero values require investigation into the underlying issue. |\n| Warnings | integer |  |  | Total number of times this query pattern generated warnings. Warnings may indicate data type conversions, NULL handling issues, or other non-critical problems. |\n| Error Attribution | string |  |  | Status of error detail attribution for this query. Values: enabled (error details available), no_data (no recent error for this digest), not_enabled (statement history consumers disabled), not_supported (required columns unavailable). |\n| Error Number | integer |  |  | Most recent error number observed for this query digest (when error attribution is enabled). |\n| SQL State | string |  | hidden | SQLSTATE code for the most recent error (when error attribution is enabled). |\n| Error Message | string |  |  | Most recent error message for this query digest (when error attribution is enabled). |\n| Rows Affected | integer |  |  | Total number of rows modified by INSERT, UPDATE, DELETE, or REPLACE statements. Useful for tracking write workloads. |\n| Rows Sent | integer |  |  | Total number of rows returned to the client by SELECT statements. High values may indicate result sets that are too large. |\n| Rows Examined | integer |  |  | Total number of rows read during query execution. A high ratio of rows examined to rows sent suggests missing or inefficient indexes. |\n| Temp Disk Tables | integer |  |  | Total number of temporary tables created on disk across all executions. Disk-based temporary tables are significantly slower than in-memory tables and may indicate memory pressure or complex operations requiring sorting/grouping. |\n| Temp Tables | integer |  |  | Total number of temporary tables created (both in-memory and on-disk). High values suggest frequent sorting, grouping, or DISTINCT operations. |\n| Full Joins | integer |  |  | Total number of joins that performed a full table scan without using an index. These are typically very expensive operations that should be optimized. |\n| Full Range Joins | integer |  | hidden | Total number of joins that used a range scan on the first table. Less efficient than indexed joins but better than full scans. |\n| Select Range | integer |  | hidden | Total number of joins that used a range on the first table for row selection. |\n| Select Range Check | integer |  | hidden | Total number of joins that checked each row after scanning for key ranges. Very inefficient operation. |\n| Select Scan | integer |  |  | Total number of joins that performed a full scan of the first table. Indicates missing indexes or suboptimal join order. |\n| Sort Merge Passes | integer |  | hidden | Total number of merge passes performed during sort operations. More passes indicate larger datasets that exceed sort buffer size. |\n| Sort Range | integer |  | hidden | Total number of sorts that used a range scan. |\n| Sort Rows | integer |  |  | Total number of rows sorted across all executions. High values indicate frequent sorting operations on large datasets. |\n| Sort Scan | integer |  | hidden | Total number of sorts that required a full table scan. |\n| No Index Used | integer |  |  | Total number of executions where no index was used for table access. These queries are prime candidates for index optimization. |\n| No Good Index Used | integer |  | hidden | Total number of executions where a non-optimal index was used. Indicates that while an index exists, a better one might improve performance. |\n| First Seen | string |  | hidden | Timestamp when this query pattern was first observed. Helps identify new queries that may have been introduced by application changes. |\n| Last Seen | string |  | hidden | Timestamp when this query pattern was last executed. Can help identify stale queries that are no longer in use. |\n| P95 Time | duration | milliseconds |  | 95th percentile execution time. 95% of executions completed within this time. Available in MySQL 8.0+. Useful for understanding typical performance. |\n| P99 Time | duration | milliseconds |  | 99th percentile execution time. 99% of executions completed within this time. Available in MySQL 8.0+. Helps identify outlier slow executions. |\n| P99.9 Time | duration | milliseconds | hidden | 99.9th percentile execution time. Available in MySQL 8.0+. Identifies extreme outliers in query performance. |\n| Sample Query | string |  | hidden | Example of an actual query execution with literal values preserved. Available in MySQL 8.0+. Helpful for understanding the exact queries being executed. |\n| Sample Seen | string |  | hidden | Timestamp when the sample query was captured. Available in MySQL 8.0+. |\n| Sample Time | duration | milliseconds | hidden | Execution time of the captured sample query. Available in MySQL 8.0+. |\n| CPU Time | duration | milliseconds |  | Total CPU time consumed across all executions. Available in MySQL 8.0.28+. Helps identify CPU-intensive queries. |\n| Max Controlled Memory | integer |  |  | Maximum memory controlled by the query executor for this query pattern. Available in MySQL 8.0.31+. Helps identify memory-intensive operations. |\n| Max Total Memory | integer |  |  | Maximum total memory used by this query pattern including both controlled and uncontrolled allocations. Available in MySQL 8.0.31+. |\n\n### Deadlock Info\n\nRetrieves the latest detected InnoDB deadlock from `SHOW ENGINE INNODB STATUS`.\n\nThe output is parsed to attribute the deadlock to the participating transactions and their query text, lock mode, lock status, and wait resource.\n\nUse cases:\n- Identify which query was chosen as the deadlock victim\n- Inspect the waiting lock resource and lock mode\n- Correlate deadlocks with application changes or deployment events\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Mysql:deadlock-info` |\n| Require Cloud | yes |\n| Performance | Executes `SHOW ENGINE INNODB STATUS` on demand:<br/>\u2022 Not part of regular collection<br/>\u2022 Query cost depends on server load and the size of the InnoDB status output |\n| Security | Query text and wait resource strings may include unmasked literal values including sensitive data (PII/secrets):<br/>\u2022 SQL literals such as emails, IDs, or tokens<br/>\u2022 Schema and table names that may be sensitive in some environments<br/>\u2022 Restrict dashboard access to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to MySQL<br/>\u2022 `deadlock_info_function_enabled` is true<br/>\u2022 The account can run `SHOW ENGINE INNODB STATUS` (PROCESS privilege)<br/>\u2022 Returns HTTP 200 with empty data when no deadlock is found<br/>\u2022 Returns HTTP 403 when PROCESS privilege is missing<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out<br/>\u2022 Returns HTTP 561 when the deadlock section cannot be parsed<br/>\u2022 Returns HTTP 503 if the collector is still initializing or the function is disabled |\n\n#### Prerequisites\n\n##### Enable deadlock-info function in Netdata\n\nSet `deadlock_info_function_enabled: true` in the `go.d/mysql.conf` job.\n\n\n##### Grant PROCESS privilege\n\nThe monitoring user must have PROCESS privilege to run `SHOW ENGINE INNODB STATUS`.\n\n\n\n#### Parameters\n\nThis function has no parameters.\n\n#### Returns\n\nParsed deadlock participants from the latest detected deadlock. Each row represents one transaction involved in the deadlock.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Row ID | string |  | hidden | Unique row identifier composed of deadlock ID and process ID. |\n| Deadlock ID | string |  |  | Identifier for the deadlock event, used to group participating transactions. |\n| Timestamp | timestamp |  |  | Timestamp of the deadlock event. Parsed from the deadlock section when available; otherwise the function execution time. |\n| Process ID | string |  |  | MySQL thread id of the transaction involved in the deadlock. |\n| Connection ID | integer |  |  | Numeric connection identifier when the process id is numeric. |\n| ECID | integer |  |  | Execution context id (engine-specific). This is typically null for MySQL and reserved for cross-engine consistency. |\n| Victim | string |  |  | \"true\" when the transaction was chosen as the deadlock victim and rolled back; otherwise \"false\". |\n| Query | string |  |  | SQL query text for the transaction involved in the deadlock. Truncated to 4096 characters. |\n| Lock Mode | string |  |  | Lock mode reported for the waiting lock (for example X or S). |\n| Lock Status | string |  |  | Lock status for the transaction. WAITING indicates the transaction was waiting on a lock. |\n| Wait Resource | string |  |  | Lock resource line from InnoDB status showing what the transaction was waiting on. |\n| Database | string |  |  | Database name when it can be inferred. This may be empty or null depending on the deadlock output. |\n\n### Error Info\n\nRetrieves recent SQL errors from Performance Schema statement history tables.\n\nThis function reads `performance_schema.events_statements_history_long` when enabled,\notherwise falls back to `performance_schema.events_statements_history`. It reports the\nmost recent error per query digest, including error number, SQLSTATE, and message.\n\nUse cases:\n- Identify recent query errors and their messages\n- Correlate errors to query patterns (digest)\n- Validate error rates seen in top-queries\n\nError messages are truncated by Performance Schema (usually 128 characters).\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Mysql:error-info` |\n| Require Cloud | yes |\n| Performance | Reads Performance Schema statement history tables on demand:<br/>\u2022 Not part of regular collection<br/>\u2022 Query cost depends on history table size and server load |\n| Security | Error messages and query text may include unmasked literals (PII/secrets).<br/>\u2022 Restrict dashboard access to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to MySQL<br/>\u2022 `error_info_function_enabled` is true<br/>\u2022 Performance Schema statement history consumers are enabled (history and/or history_long)<br/>\u2022 Returns HTTP 200 with empty data when no errors are found<br/>\u2022 Returns HTTP 503 when required consumers are not enabled or function disabled<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Enable error-info function in Netdata\n\nSet `error_info_function_enabled: true` in the `go.d/mysql.conf` job.\n\n\n##### Enable statement history consumers\n\nEnsure `events_statements_history` and/or `events_statements_history_long` consumers are enabled.\n\n\n##### Grant SELECT on Performance Schema\n\nThe monitoring user must have SELECT on `performance_schema.*` to read statement history tables.\n\n\n\n#### Parameters\n\nThis function has no parameters.\n\n#### Returns\n\nMost recent error per query digest from Performance Schema history tables.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Digest | string |  | hidden | Unique hash identifier for the normalized query pattern. |\n| Query | string |  |  | Normalized query text when available (digest text or SQL text). |\n| Schema | string |  |  | Database schema name when available. |\n| Error Number | integer |  |  | MySQL error number for the most recent error of this digest. |\n| SQL State | string |  |  | SQLSTATE code for the most recent error. |\n| Error Message | string |  |  | Error message for the most recent error. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_redo_log_activity | redo_written, checkpointed | B/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_redo_log_occupancy | occupancy | percentage | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_redo_log_checkpoint_age | age | B | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage |   | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s |   | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s |   | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s |   | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s |   | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s |   | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s |   | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s |   | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s |   | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s |   | \u2022 | \u2022 |\n\n",integration_type:"collector",id:"go.d.plugin-mysql-MySQL",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/mysql/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-percona_mysql",plugin_name:"go.d.plugin",module_name:"mysql",monitored_instance:{name:"Percona MySQL",link:"https://www.percona.com/software/mysql-database/percona-server",icon_filename:"percona.svg",categories:["data-collection.databases"]},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},info_provided_to_referring_integrations:{description:""},keywords:["db","database","mysql","maria","mariadb","sql"]},overview:'# Percona MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW ENGINE INNODB STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nPercona MySQL can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP sockets:\n\n- 127.0.0.1:3306\n- "[::1]:3306"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:"## Setup\n\n\nYou can configure the **mysql** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **mysql**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/mysql.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the following [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n- **MySQL and MariaDB < 10.5.9**\n\n  ```mysql\n  CREATE USER 'netdata'@'localhost';\n  GRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\n  FLUSH PRIVILEGES;\n  ```\n\n- **MariaDB >= 10.5.9**\n\n  For MariaDB 10.5.9 and later, use the `SLAVE MONITOR` privilege instead of `REPLICATION CLIENT`:\n\n  ```mysql\n  CREATE USER 'netdata'@'localhost';\n  GRANT USAGE, SLAVE MONITOR, PROCESS ON *.* TO 'netdata'@'localhost';\n  FLUSH PRIVILEGES;\n  ```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password.\nIt will only be able to gather statistics without being able to alter or affect operations in any way.\n\n\n#### Enable User Statistics (optional)\n\nTo collect per-user statistics, the [User Statistics](https://mariadb.com/docs/server/ha-and-performance/optimization-and-tuning/query-optimizations/statistics-for-optimizing-queries/user-statistics) plugin must be enabled.\nThis is available for **MariaDB** and **Percona**, not for MySQL.\n\nBy default, statistics are not collected. To enable the plugin, set the `userstat` system variable.\n\n- **In a configuration file** (persistent, requires restart):\n\n  ```ini\n  [mariadb]\n  userstat = 1\n  ```\n\n- **Dynamically** (takes effect immediately, does not persist across restarts):\n\n  ```mysql\n  SET GLOBAL userstat=1;\n  ```\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n|  | my.cnf | Path to a `my.cnf` file to read connection settings from the `[client]` section. |  | no |\n|  | timeout | Query timeout (seconds). | 1 | no |\n| **Functions** | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n|  | functions.deadlock_info.disabled | Disable the [deadlock-info](#deadlock-info) function. | no | no |\n|  | functions.deadlock_info.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.error_info.disabled | Disable the [error-info](#error-info) function. | no | no |\n|  | functions.error_info.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **mysql** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the mysql data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _mysql_ (or scroll the list) to locate the **mysql** collector.\n5. Click the **+** next to the **mysql** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n\n##### Examples\n\n###### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n###### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n###### Connection with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n###### my.cnf\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: netdata@tcp(127.0.0.1:3306)/\n\n  - name: remote\n    dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m mysql\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m mysql -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `mysql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep mysql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep mysql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep mysql\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves aggregated SQL query performance metrics from MySQL [performance_schema.events_statements_summary_by_digest](https://dev.mysql.com/doc/refman/8.4/en/performance-schema-statement-summary-tables.html) table.\n\nThis function queries the `events_statements_summary_by_digest` table which contains aggregated statistics for SQL statements grouped by their digest (normalized query pattern). The function dynamically detects available columns based on your MySQL/MariaDB version.\n\nUse cases:\n- Identify slow queries that consume the most execution time\n- Find frequently executed queries that may benefit from optimization\n- Detect queries with high lock time, errors, or table scans\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Mysql:top-queries` |\n| Require Cloud | yes |\n| Performance | Queries the `events_statements_summary_by_digest` table:<br/>\u2022 On busy servers with high query throughput, the digest table can grow large<br/>\u2022 Default limit of 500 rows balances usefulness with performance |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in WHERE clauses or INSERT values<br/>\u2022 Business data and internal identifiers<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to MySQL<br/>\u2022 Performance Schema is enabled with statement digest collection<br/>\u2022 Returns HTTP 503 if collector is still initializing<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Enable performance_schema statement digest collection\n\nPerformance Schema must be enabled and statement instrumentation must be configured to collect digest statistics.\n\n1. Check if Performance Schema is enabled:\n   ```sql\n   SELECT @@performance_schema;\n   ```\n\n2. Check statement instrumentation configuration:\n   ```sql\n   SELECT * FROM performance_schema.setup_consumers\n   WHERE NAME LIKE '%statement%';\n   ```\n\n3. The following consumer should be enabled:\n   - `events_statements_summary_by_digest`\n\n4. Enable statement consumers if needed:\n   ```sql\n   UPDATE performance_schema.setup_consumers\n   SET ENABLED = 'YES'\n   WHERE NAME LIKE 'events_statements%';\n   ```\n\n   :::info\n\n   - Changes to `setup_consumers` take effect immediately without requiring a server restart.\n   - MariaDB also supports the `events_statements_summary_by_digest` table. Exact consumer names may vary by MariaDB version, so checking `setup_consumers` first as shown above is recommended.\n\n   :::\n\n5. Verify digest table contains data:\n   ```sql\n   SELECT COUNT(*) FROM performance_schema.events_statements_summary_by_digest;\n   ```\n\n   Note: Statement digest data is accumulated since server startup or since the table was last truncated. To reset statistics:\n   ```sql\n   TRUNCATE TABLE performance_schema.events_statements_summary_by_digest;\n   ```\n\n   Ensure that statement instruments are enabled in the Performance Schema so that statement digest statistics are collected. Refer to your MySQL or MariaDB version documentation for the appropriate configuration options.\n\n\n##### Grant SELECT permission on Performance Schema tables\n\nThe netdata user must have SELECT permission on Performance Schema tables. The standard collector permissions\n(USAGE, REPLICATION CLIENT, PROCESS) do not automatically include Performance Schema access.\n\n1. Grant the required permission:\n   ```sql\n   GRANT SELECT ON performance_schema.* TO 'netdata'@'localhost';\n   FLUSH PRIVILEGES;\n   ```\n\n   :::info\n\n   The host part (`'localhost'`) should match how the netdata user connects. If connecting via TCP/IP, you may need `'netdata'@'%'` or a specific IP address instead.\n\n   :::\n\n2. Verify access:\n   ```sql\n   -- As the netdata user:\n   SELECT COUNT(*) FROM performance_schema.events_statements_summary_by_digest;\n   ```\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. The available options depend on your MySQL/MariaDB version and include metrics like total execution time, number of calls, lock time, errors, rows examined, and more. Defaults to total execution time. | yes | totalTime |  |\n\n#### Returns\n\nAggregated statement statistics from Performance Schema, grouped by query digest. Each row represents a unique query pattern with cumulative metrics across all executions.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Digest | string |  | hidden | Unique hash identifier for the normalized query pattern. Queries with the same structure (different literal values) share the same digest. |\n| Query | string |  |  | Normalized SQL query text with literals replaced by placeholders (e.g., '?' for values). Truncated to 4096 characters. |\n| Schema | string |  |  | Database schema name where the query was executed. Empty string for queries without a schema context. |\n| Calls | integer |  |  | Total number of times this query pattern has been executed since server startup or since the digest table was last truncated. |\n| Total Time | duration | milliseconds |  | Cumulative execution time across all executions. High values indicate queries that consume significant server resources. |\n| Min Time | duration | milliseconds | hidden | Minimum execution time observed for a single execution. Helps identify variability in query performance. |\n| Avg Time | duration | milliseconds |  | Average execution time (total time divided by calls). Use this to compare performance across different query patterns. |\n| Max Time | duration | milliseconds | hidden | Maximum execution time observed for a single execution. Large gaps between min and max may indicate performance instability. |\n| Lock Time | duration | milliseconds |  | Total time spent waiting for table locks across all executions. High lock time may indicate contention from concurrent transactions. |\n| Errors | integer |  |  | Total number of times this query pattern resulted in an error. Non-zero values require investigation into the underlying issue. |\n| Warnings | integer |  |  | Total number of times this query pattern generated warnings. Warnings may indicate data type conversions, NULL handling issues, or other non-critical problems. |\n| Error Attribution | string |  |  | Status of error detail attribution for this query. Values: enabled (error details available), no_data (no recent error for this digest), not_enabled (statement history consumers disabled), not_supported (required columns unavailable). |\n| Error Number | integer |  |  | Most recent error number observed for this query digest (when error attribution is enabled). |\n| SQL State | string |  | hidden | SQLSTATE code for the most recent error (when error attribution is enabled). |\n| Error Message | string |  |  | Most recent error message for this query digest (when error attribution is enabled). |\n| Rows Affected | integer |  |  | Total number of rows modified by INSERT, UPDATE, DELETE, or REPLACE statements. Useful for tracking write workloads. |\n| Rows Sent | integer |  |  | Total number of rows returned to the client by SELECT statements. High values may indicate result sets that are too large. |\n| Rows Examined | integer |  |  | Total number of rows read during query execution. A high ratio of rows examined to rows sent suggests missing or inefficient indexes. |\n| Temp Disk Tables | integer |  |  | Total number of temporary tables created on disk across all executions. Disk-based temporary tables are significantly slower than in-memory tables and may indicate memory pressure or complex operations requiring sorting/grouping. |\n| Temp Tables | integer |  |  | Total number of temporary tables created (both in-memory and on-disk). High values suggest frequent sorting, grouping, or DISTINCT operations. |\n| Full Joins | integer |  |  | Total number of joins that performed a full table scan without using an index. These are typically very expensive operations that should be optimized. |\n| Full Range Joins | integer |  | hidden | Total number of joins that used a range scan on the first table. Less efficient than indexed joins but better than full scans. |\n| Select Range | integer |  | hidden | Total number of joins that used a range on the first table for row selection. |\n| Select Range Check | integer |  | hidden | Total number of joins that checked each row after scanning for key ranges. Very inefficient operation. |\n| Select Scan | integer |  |  | Total number of joins that performed a full scan of the first table. Indicates missing indexes or suboptimal join order. |\n| Sort Merge Passes | integer |  | hidden | Total number of merge passes performed during sort operations. More passes indicate larger datasets that exceed sort buffer size. |\n| Sort Range | integer |  | hidden | Total number of sorts that used a range scan. |\n| Sort Rows | integer |  |  | Total number of rows sorted across all executions. High values indicate frequent sorting operations on large datasets. |\n| Sort Scan | integer |  | hidden | Total number of sorts that required a full table scan. |\n| No Index Used | integer |  |  | Total number of executions where no index was used for table access. These queries are prime candidates for index optimization. |\n| No Good Index Used | integer |  | hidden | Total number of executions where a non-optimal index was used. Indicates that while an index exists, a better one might improve performance. |\n| First Seen | string |  | hidden | Timestamp when this query pattern was first observed. Helps identify new queries that may have been introduced by application changes. |\n| Last Seen | string |  | hidden | Timestamp when this query pattern was last executed. Can help identify stale queries that are no longer in use. |\n| P95 Time | duration | milliseconds |  | 95th percentile execution time. 95% of executions completed within this time. Available in MySQL 8.0+. Useful for understanding typical performance. |\n| P99 Time | duration | milliseconds |  | 99th percentile execution time. 99% of executions completed within this time. Available in MySQL 8.0+. Helps identify outlier slow executions. |\n| P99.9 Time | duration | milliseconds | hidden | 99.9th percentile execution time. Available in MySQL 8.0+. Identifies extreme outliers in query performance. |\n| Sample Query | string |  | hidden | Example of an actual query execution with literal values preserved. Available in MySQL 8.0+. Helpful for understanding the exact queries being executed. |\n| Sample Seen | string |  | hidden | Timestamp when the sample query was captured. Available in MySQL 8.0+. |\n| Sample Time | duration | milliseconds | hidden | Execution time of the captured sample query. Available in MySQL 8.0+. |\n| CPU Time | duration | milliseconds |  | Total CPU time consumed across all executions. Available in MySQL 8.0.28+. Helps identify CPU-intensive queries. |\n| Max Controlled Memory | integer |  |  | Maximum memory controlled by the query executor for this query pattern. Available in MySQL 8.0.31+. Helps identify memory-intensive operations. |\n| Max Total Memory | integer |  |  | Maximum total memory used by this query pattern including both controlled and uncontrolled allocations. Available in MySQL 8.0.31+. |\n\n### Deadlock Info\n\nRetrieves the latest detected InnoDB deadlock from `SHOW ENGINE INNODB STATUS`.\n\nThe output is parsed to attribute the deadlock to the participating transactions and their query text, lock mode, lock status, and wait resource.\n\nUse cases:\n- Identify which query was chosen as the deadlock victim\n- Inspect the waiting lock resource and lock mode\n- Correlate deadlocks with application changes or deployment events\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Mysql:deadlock-info` |\n| Require Cloud | yes |\n| Performance | Executes `SHOW ENGINE INNODB STATUS` on demand:<br/>\u2022 Not part of regular collection<br/>\u2022 Query cost depends on server load and the size of the InnoDB status output |\n| Security | Query text and wait resource strings may include unmasked literal values including sensitive data (PII/secrets):<br/>\u2022 SQL literals such as emails, IDs, or tokens<br/>\u2022 Schema and table names that may be sensitive in some environments<br/>\u2022 Restrict dashboard access to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to MySQL<br/>\u2022 `deadlock_info_function_enabled` is true<br/>\u2022 The account can run `SHOW ENGINE INNODB STATUS` (PROCESS privilege)<br/>\u2022 Returns HTTP 200 with empty data when no deadlock is found<br/>\u2022 Returns HTTP 403 when PROCESS privilege is missing<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out<br/>\u2022 Returns HTTP 561 when the deadlock section cannot be parsed<br/>\u2022 Returns HTTP 503 if the collector is still initializing or the function is disabled |\n\n#### Prerequisites\n\n##### Enable deadlock-info function in Netdata\n\nSet `deadlock_info_function_enabled: true` in the `go.d/mysql.conf` job.\n\n\n##### Grant PROCESS privilege\n\nThe monitoring user must have PROCESS privilege to run `SHOW ENGINE INNODB STATUS`.\n\n\n\n#### Parameters\n\nThis function has no parameters.\n\n#### Returns\n\nParsed deadlock participants from the latest detected deadlock. Each row represents one transaction involved in the deadlock.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Row ID | string |  | hidden | Unique row identifier composed of deadlock ID and process ID. |\n| Deadlock ID | string |  |  | Identifier for the deadlock event, used to group participating transactions. |\n| Timestamp | timestamp |  |  | Timestamp of the deadlock event. Parsed from the deadlock section when available; otherwise the function execution time. |\n| Process ID | string |  |  | MySQL thread id of the transaction involved in the deadlock. |\n| Connection ID | integer |  |  | Numeric connection identifier when the process id is numeric. |\n| ECID | integer |  |  | Execution context id (engine-specific). This is typically null for MySQL and reserved for cross-engine consistency. |\n| Victim | string |  |  | \"true\" when the transaction was chosen as the deadlock victim and rolled back; otherwise \"false\". |\n| Query | string |  |  | SQL query text for the transaction involved in the deadlock. Truncated to 4096 characters. |\n| Lock Mode | string |  |  | Lock mode reported for the waiting lock (for example X or S). |\n| Lock Status | string |  |  | Lock status for the transaction. WAITING indicates the transaction was waiting on a lock. |\n| Wait Resource | string |  |  | Lock resource line from InnoDB status showing what the transaction was waiting on. |\n| Database | string |  |  | Database name when it can be inferred. This may be empty or null depending on the deadlock output. |\n\n### Error Info\n\nRetrieves recent SQL errors from Performance Schema statement history tables.\n\nThis function reads `performance_schema.events_statements_history_long` when enabled,\notherwise falls back to `performance_schema.events_statements_history`. It reports the\nmost recent error per query digest, including error number, SQLSTATE, and message.\n\nUse cases:\n- Identify recent query errors and their messages\n- Correlate errors to query patterns (digest)\n- Validate error rates seen in top-queries\n\nError messages are truncated by Performance Schema (usually 128 characters).\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Mysql:error-info` |\n| Require Cloud | yes |\n| Performance | Reads Performance Schema statement history tables on demand:<br/>\u2022 Not part of regular collection<br/>\u2022 Query cost depends on history table size and server load |\n| Security | Error messages and query text may include unmasked literals (PII/secrets).<br/>\u2022 Restrict dashboard access to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to MySQL<br/>\u2022 `error_info_function_enabled` is true<br/>\u2022 Performance Schema statement history consumers are enabled (history and/or history_long)<br/>\u2022 Returns HTTP 200 with empty data when no errors are found<br/>\u2022 Returns HTTP 503 when required consumers are not enabled or function disabled<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Enable error-info function in Netdata\n\nSet `error_info_function_enabled: true` in the `go.d/mysql.conf` job.\n\n\n##### Enable statement history consumers\n\nEnsure `events_statements_history` and/or `events_statements_history_long` consumers are enabled.\n\n\n##### Grant SELECT on Performance Schema\n\nThe monitoring user must have SELECT on `performance_schema.*` to read statement history tables.\n\n\n\n#### Parameters\n\nThis function has no parameters.\n\n#### Returns\n\nMost recent error per query digest from Performance Schema history tables.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Digest | string |  | hidden | Unique hash identifier for the normalized query pattern. |\n| Query | string |  |  | Normalized query text when available (digest text or SQL text). |\n| Schema | string |  |  | Database schema name when available. |\n| Error Number | integer |  |  | MySQL error number for the most recent error of this digest. |\n| SQL State | string |  |  | SQLSTATE code for the most recent error. |\n| Error Message | string |  |  | Error message for the most recent error. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_redo_log_activity | redo_written, checkpointed | B/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_redo_log_occupancy | occupancy | percentage | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_redo_log_checkpoint_age | age | B | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage |   | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s |   | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s |   | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s |   | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s |   | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s |   | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s |   | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s |   | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s |   | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s |   | \u2022 | \u2022 |\n\n",integration_type:"collector",id:"go.d.plugin-mysql-Percona_MySQL",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/mysql/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-nats",plugin_name:"go.d.plugin",module_name:"nats",monitored_instance:{name:"NATS",link:"https://nats.io/",categories:["data-collection.databases"],icon_filename:"nats.svg"},related_resources:{integrations:{list:[]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["nats","messaging","broker"]},overview:"# NATS\n\nPlugin: go.d.plugin\nModule: nats\n\n## Overview\n\nThis collector monitors the activity and performance of NATS servers.\n\n\nIt sends HTTP requests to the NATS HTTP server's dedicated [monitoring port](https://docs.nats.io/running-a-nats-service/nats_admin/monitoring#monitoring-nats).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect NATS instances running on:\n\n- localhost that are listening on port 8222\n- within Docker containers\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **nats** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **nats**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/nats.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable NATS monitoring\n\nSee [Enable monitoring](https://docs.nats.io/running-a-nats-service/nats_admin/monitoring#enabling-monitoring).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8222 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **Validation** | healthz_check | Selects the `/healthz` [endpoint mode](https://docs.nats.io/running-a-nats-service/nats_admin/monitoring#health-healthz). Options: `default` (standard check), `js-enabled-only` (error if JetStream is disabled), `js-server-only` (skip account/stream/consumer checks). | default | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **nats** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the nats data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _nats_ (or scroll the list) to locate the **nats** collector.\n5. Click the **+** next to the **nats** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/nats.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nats.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8222\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8222\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nNATS with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8222\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8222\n\n  - name: remote\n    url: http://192.0.2.1:8222\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nats` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m nats\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m nats -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nats` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nats\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nats /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nats\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per server\n\nThese metrics refer to NATS servers.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_name | The name of the NATS cluster this server belongs to. |\n| server_id | A unique identifier for a server within the NATS cluster. |\n| server_name | The configured name of the NATS server. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nats.server_traffic | received, sent | bytes/s |\n| nats.server_messages | received, sent | messages/s |\n| nats.server_connections | active | connections |\n| nats.server_connections_rate | connections | connections/s |\n| nats.server_health_probe_status | ok, error | status |\n| nats.server_cpu_usage | used | percent |\n| nats.server_mem_usage | used | bytes |\n| nats.server_uptime | uptime | seconds |\n| nats.jetstream_streams | active | streams |\n| nats.jetstream_streams_storage_bytes | used | bytes |\n| nats.jetstream_streams_storage_messages | stored | messaged |\n| nats.jetstream_consumers | active | consumers |\n| nats.jetstream_api_requests | requests | requests/s |\n| nats.jetstream_api_errors | errors | errors/s |\n| nats.jetstream_api_inflight | inflight | requests |\n| nats.jetstream_memory_used | used | bytes |\n| nats.jetstream_storage_used | used | bytes |\n\n### Per http endpoint\n\nThese metrics refer to HTTP endpoints.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_name | The name of the NATS cluster this server belongs to. |\n| server_id | A unique identifier for a server within the NATS cluster. |\n| server_name | The configured name of the NATS server. |\n| http_endpoint | HTTP endpoint path. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nats.http_endpoint_requests | requests | requests/s |\n\n### Per account\n\nThese metrics refer to [Accounts](https://docs.nats.io/running-a-nats-service/nats_admin/monitoring#account-statistics).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_name | The name of the NATS cluster this server belongs to. |\n| server_id | A unique identifier for a server within the NATS cluster. |\n| server_name | The configured name of the NATS server. |\n| account | Account name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nats.account_traffic | received, sent | bytes/s |\n| nats.account_messages | received, sent | messages/s |\n| nats.account_connections | active | connections |\n| nats.account_connections_rate | connections | connections/s |\n| nats.account_subscriptions | active | subscriptions |\n| nats.account_slow_consumers | slow | consumers/s |\n| nats.account_leaf_nodes | leafnode | servers |\n\n### Per route\n\nThese metrics refer to [Routes](https://docs.nats.io/running-a-nats-service/nats_admin/monitoring#route-information).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_name | The name of the NATS cluster this server belongs to. |\n| server_id | A unique identifier for a server within the NATS cluster. |\n| server_name | The configured name of the NATS server. |\n| route_id | A unique identifier for a route within the NATS cluster. |\n| remote_id | he unique identifier of the remote server connected via the route. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nats.route_traffic | in, out | bytes/s |\n| nats.route_messages | in, out | messages/s |\n| nats.route_subscriptions | active | subscriptions |\n\n### Per inbound gateway connection\n\nThese metrics refer to [Inbound Gateway Connections](https://docs.nats.io/running-a-nats-service/nats_admin/monitoring#gateway-information).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_name | The name of the NATS cluster this server belongs to. |\n| server_id | A unique identifier for a server within the NATS cluster. |\n| server_name | The configured name of the NATS server. |\n| gateway | The name of the local gateway. |\n| remote_gateway | The name of the remote gateway. |\n| cid | A unique identifier for the connection. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nats.inbound_gateway_conn_traffic | in, out | bytes/s |\n| nats.inbound_gateway_conn_messages | in, out | messages/s |\n| nats.inbound_gateway_conn_subscriptions | active | subscriptions |\n| nats.inbound_gateway_conn_uptime | uptime | seconds |\n\n### Per outbound gateway connection\n\nThese metrics refer to [Outbound Gateway Connections](https://docs.nats.io/running-a-nats-service/nats_admin/monitoring#gateway-information).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_name | The name of the NATS cluster this server belongs to. |\n| server_id | A unique identifier for a server within the NATS cluster. |\n| server_name | The configured name of the NATS server. |\n| gateway | The name of the local gateway. |\n| remote_gateway | The name of the remote gateway. |\n| cid | A unique identifier for the connection. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nats.outbound_gateway_conn_traffic | in, out | bytes/s |\n| nats.outbound_gateway_conn_messages | in, out | messages/s |\n| nats.outbound_gateway_conn_subscriptions | active | subscriptions |\n| nats.outbound_gateway_conn_uptime | uptime | seconds |\n\n### Per leaf node connection\n\nThese metrics refer to [Leaf Node Connections](https://docs.nats.io/running-a-nats-service/nats_admin/monitoring#leaf-node-information).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_name | The name of the NATS cluster this server belongs to. |\n| server_id | A unique identifier for a server within the NATS cluster. |\n| server_name | The configured name of the NATS server. |\n| remote_name | Unique identifier of the remote leaf node server, either its configured name or automatically assigned ID. |\n| account | Name of the associated account. |\n| ip | IP address of the remote server. |\n| port | Port used for the connection to the remote server. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nats.leaf_node_conn_traffic | in, out | bytes/s |\n| nats.leaf_node_conn_messages | in, out | messages/s |\n| nats.leaf_node_conn_subscriptions | active | subscriptions |\n| nats.leaf_node_conn_rtt | rtt | microseconds |\n\n",integration_type:"collector",id:"go.d.plugin-nats-NATS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/nats/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-nginx",plugin_name:"go.d.plugin",module_name:"nginx",monitored_instance:{name:"NGINX",link:"https://www.nginx.com/",categories:["data-collection.web-servers-and-proxies"],icon_filename:"nginx.svg"},related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"go.d.plugin",module_name:"web_log"},{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["nginx","web","webserver","http","proxy"]},overview:'# NGINX\n\nPlugin: go.d.plugin\nModule: nginx\n\n## Overview\n\nThis collector monitors the activity and performance of NGINX servers, and collects metrics such as the number of connections, their status, and client requests.\n\n\nIt sends HTTP requests to the NGINX location [stub-status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html), which is a built-in location that provides metrics about the NGINX server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nNGINX can be monitored further using the following other integrations:\n\n- {% relatedResource id="go.d.plugin-httpcheck-HTTP_Endpoints" %}HTTP Endpoints{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-web_log-Web_server_log_files" %}Web server log files{% /relatedResource %}\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1/basic_status\n- http://localhost/stub_status\n- http://127.0.0.1/stub_status\n- http://127.0.0.1/nginx_status\n- http://127.0.0.1/status\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **nginx** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **nginx**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/nginx.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable status support\n\nConfigure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html).\n\n1. Add a status location\n\n   Create a small server/location that only listens on localhost and exposes `/stub_status`:\n\n   **For local monitoring (default):**\n\n   ```nginx\n     # /etc/nginx/conf.d/stub_status.conf\n     server {\n         listen 127.0.0.1:80;\n         server_name localhost 127.0.0.1;\n\n         location = /stub_status {\n             stub_status;        # exposes NGINX counters\n             access_log off;     # keep logs clean\n             allow 127.0.0.1;    # only local access\n             deny all;\n         }\n     }\n   ```\n\n    **For remote monitoring:**\n    - Change `listen` to bind to an accessible interface (e.g., `listen 80;`)\n    - Replace `allow 127.0.0.1;` with your Netdata collector\'s IP address\n    - Keep `deny all;` to block other IPs\n\n2. Reload NGINX and restart Netdata\n\n   ```bash\n   nginx -t\n   # if OK:\n   systemctl reload nginx    # or: nginx -s reload\n\n   systemctl restart netdata\n   ```\n\n   After restart, Netdata will auto-detect the endpoint and begin collecting NGINX metrics.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1/stub_status | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **nginx** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the nginx data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _nginx_ (or scroll the list) to locate the **nginx** collector.\n5. Click the **+** next to the **nginx** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/nginx.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginx.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/stub_status\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/stub_status\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nNGINX with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/stub_status\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/stub_status\n\n  - name: remote\n    url: http://192.0.2.1/stub_status\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nginx` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m nginx\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m nginx -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nginx` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nginx\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nginx /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nginx\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginx.connections | active | connections |\n| nginx.connections_status | reading, writing, idle | connections |\n| nginx.connections_accepted_handled | accepted, handled | connections/s |\n| nginx.requests | requests | requests/s |\n\n",integration_type:"collector",id:"go.d.plugin-nginx-NGINX",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/nginx/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-nginxplus",plugin_name:"go.d.plugin",module_name:"nginxplus",monitored_instance:{name:"NGINX Plus",link:"https://www.nginx.com/products/nginx/",icon_filename:"nginxplus.svg",categories:["data-collection.web-servers-and-proxies"]},keywords:["nginxplus","nginx","web","webserver","http","proxy"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# NGINX Plus\n\nPlugin: go.d.plugin\nModule: nginxplus\n\n## Overview\n\nThis collector monitors NGINX Plus servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **nginxplus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **nginxplus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/nginxplus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Config API\n\nTo configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **nginxplus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the nginxplus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _nginxplus_ (or scroll the list) to locate the **nginxplus** collector.\n5. Click the **+** next to the **nginxplus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/nginxplus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxplus.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1\n\n```\n{% /details %}\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1\n\n  - name: remote\n    url: http://192.0.2.1\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nginxplus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m nginxplus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m nginxplus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nginxplus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nginxplus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nginxplus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nginxplus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX Plus instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.client_connections_rate | accepted, dropped | connections/s |\n| nginxplus.client_connections_count | active, idle | connections |\n| nginxplus.ssl_handshakes_rate | successful, failed | handshakes/s |\n| nginxplus.ssl_handshakes_failures_rate | no_common_protocol, no_common_cipher, timeout, peer_rejected_cert | failures/s |\n| nginxplus.ssl_verification_errors_rate | no_cert, expired_cert, revoked_cert, hostname_mismatch, other | errors/s |\n| nginxplus.ssl_session_reuses_rate | ssl_session | reuses/s |\n| nginxplus.http_requests_rate | requests | requests/s |\n| nginxplus.http_requests_count | requests | requests |\n| nginxplus.uptime | uptime | seconds |\n\n### Per http server zone\n\nThese metrics refer to the HTTP server zone.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| http_server_zone | HTTP server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_server_zone_requests_rate | requests | requests/s |\n| nginxplus.http_server_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_server_zone_requests_processing_count | processing | requests |\n| nginxplus.http_server_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http location zone\n\nThese metrics refer to the HTTP location zone.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| http_location_zone | HTTP location zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_location_zone_requests_rate | requests | requests/s |\n| nginxplus.http_location_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_location_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_location_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http upstream\n\nThese metrics refer to the HTTP upstream.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_peers_count | peers | peers |\n| nginxplus.http_upstream_zombies_count | zombie | servers |\n| nginxplus.http_upstream_keepalive_count | keepalive | connections |\n\n### Per http upstream server\n\nThese metrics refer to the HTTP upstream server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n| http_upstream_server_address | HTTP upstream server address (e.g. 127.0.0.1:81) |\n| http_upstream_server_name | HTTP upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_server_requests_rate | requests | requests/s |\n| nginxplus.http_upstream_server_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_upstream_server_response_time | response | milliseconds |\n| nginxplus.http_upstream_server_response_header_time | header | milliseconds |\n| nginxplus.http_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_upstream_server_state | up, down, draining, unavail, checking, unhealthy | state |\n| nginxplus.http_upstream_server_connections_count | active | connections |\n| nginxplus.http_upstream_server_downtime | downtime | seconds |\n\n### Per http cache\n\nThese metrics refer to the HTTP cache.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| http_cache | HTTP cache name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_cache_state | warm, cold | state |\n| nginxplus.http_cache_iops | served, written, bypass | responses/s |\n| nginxplus.http_cache_io | served, written, bypass | bytes/s |\n| nginxplus.http_cache_size | size | bytes |\n\n### Per stream server zone\n\nThese metrics refer to the Stream server zone.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| stream_server_zone | Stream server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_server_zone_connections_rate | accepted | connections/s |\n| nginxplus.stream_server_zone_sessions_per_code_class_rate | 2xx, 4xx, 5xx | sessions/s |\n| nginxplus.stream_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_server_zone_connections_processing_count | processing | connections |\n| nginxplus.stream_server_zone_connections_discarded_rate | discarded | connections/s |\n\n### Per stream upstream\n\nThese metrics refer to the Stream upstream.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_peers_count | peers | peers |\n| nginxplus.stream_upstream_zombies_count | zombie | servers |\n\n### Per stream upstream server\n\nThese metrics refer to the Stream upstream server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n| stream_upstream_server_address | Stream upstream server address (e.g. 127.0.0.1:12346) |\n| stream_upstream_server_name | Stream upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_server_connections_rate | forwarded | connections/s |\n| nginxplus.stream_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_upstream_server_state | up, down, unavail, checking, unhealthy | state |\n| nginxplus.stream_upstream_server_downtime | downtime | seconds |\n| nginxplus.stream_upstream_server_connections_count | active | connections |\n\n### Per resolver zone\n\nThese metrics refer to the resolver zone.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| resolver_zone | resolver zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.resolver_zone_requests_rate | name, srv, addr | requests/s |\n| nginxplus.resolver_zone_responses_rate | noerror, formerr, servfail, nxdomain, notimp, refused, timedout, unknown | responses/s |\n\n",integration_type:"collector",id:"go.d.plugin-nginxplus-NGINX_Plus",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/nginxplus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-nginxunit",plugin_name:"go.d.plugin",module_name:"nginxunit",monitored_instance:{name:"NGINX Unit",link:"https://unit.nginx.org/",categories:["data-collection.web-servers-and-proxies"],icon_filename:"nginx.svg"},related_resources:{integrations:{list:[]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["nginx","unit","web","appserver","http"]},overview:"# NGINX Unit\n\nPlugin: go.d.plugin\nModule: nginxunit\n\n## Overview\n\nThis collector monitors the activity and performance of NGINX Unit servers, and collects metrics such as the number of connections, their status, and client requests.\n\n\nIt sends HTTP requests to the NGINX Unit [Status API](https://unit.nginx.org/statusapi/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect NGINX Unit instances running on:\n\n- localhost that are listening on port 8000\n- within Docker containers\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **nginxunit** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **nginxunit**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/nginxunit.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable HTTP Control API\n\nSee [Control API](https://unit.nginx.org/controlapi/#configuration-api) documentation.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8000 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **nginxunit** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the nginxunit data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _nginxunit_ (or scroll the list) to locate the **nginxunit** collector.\n5. Click the **+** next to the **nginxunit** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/nginxunit.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxunit.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8000\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1::8000\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nNGINX Unit with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8000\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8000\n\n  - name: remote\n    url: http://192.0.2.1:8000\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nginxunit` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m nginxunit\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m nginxunit -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nginxunit` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nginxunit\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nginxunit /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nginxunit\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX Unit instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxunit.requests_rate | requests | requests/s |\n| nginxunit.connections_rate | accepted, closed | connections/s |\n| nginxunit.connections_current | active, idle | connections |\n\n",integration_type:"collector",id:"go.d.plugin-nginxunit-NGINX_Unit",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/nginxunit/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-nginxvts",plugin_name:"go.d.plugin",module_name:"nginxvts",monitored_instance:{name:"NGINX VTS",link:"https://www.nginx.com/",icon_filename:"nginx.svg",categories:["data-collection.web-servers-and-proxies"]},keywords:["webserver"],related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"web_log"},{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# NGINX VTS\n\nPlugin: go.d.plugin\nModule: nginxvts\n\n## Overview\n\nThis collector monitors NGINX servers with [virtual host traffic status module](https://github.com/vozlt/nginx-module-vts).\n\n\nIt sends HTTP requests to the NGINX VTS location [status](https://github.com/vozlt/nginx-module-vts#synopsis), \nwhich is a built-in location that provides metrics about the NGINX VTS server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nNGINX VTS can be monitored further using the following other integrations:\n\n- {% relatedResource id="go.d.plugin-web_log-Web_server_log_files" %}Web server log files{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-httpcheck-HTTP_Endpoints" %}HTTP Endpoints{% /relatedResource %}\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **nginxvts** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **nginxvts**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/nginxvts.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Configure nginx-vts module\n\nTo configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://localhost/status/format/json | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **nginxvts** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the nginxvts data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _nginxvts_ (or scroll the list) to locate the **nginxvts** collector.\n5. Click the **+** next to the **nginxvts** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/nginxvts.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxvts.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/status/format/json\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/server-status?auto\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1/status/format/json\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/status/format/json\n\n  - name: remote\n    url: http://192.0.2.1/status/format/json\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nginxvts` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m nginxvts\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m nginxvts -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nginxvts` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nginxvts\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nginxvts /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nginxvts\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX VTS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxvts.requests_total | requests | requests/s |\n| nginxvts.active_connections | active | connections |\n| nginxvts.connections_total | reading, writing, waiting, accepted, handled | connections/s |\n| nginxvts.uptime | uptime | seconds |\n| nginxvts.shm_usage | max, used | bytes |\n| nginxvts.shm_used_node | used | nodes |\n| nginxvts.server_requests_total | requests | requests/s |\n| nginxvts.server_responses_total | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxvts.server_traffic_total | in, out | bytes/s |\n| nginxvts.server_cache_total | miss, bypass, expired, stale, updating, revalidated, hit, scarce | events/s |\n\n",integration_type:"collector",id:"go.d.plugin-nginxvts-NGINX_VTS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/nginxvts/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-nsd",plugin_name:"go.d.plugin",module_name:"nsd",monitored_instance:{name:"NSD",link:"https://nsd.docs.nlnetlabs.nl/en/latest",icon_filename:"nsd.svg",categories:["data-collection.networking"]},keywords:["nsd","dns"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# NSD\n\nPlugin: go.d.plugin\nModule: nsd\n\n## Overview\n\nThis collector monitors NSD statistics like queries, zones, protocols, query types and more. It relies on the [`nsd-control`](https://nsd.docs.nlnetlabs.nl/en/latest/manpages/nsd-control.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\nExecuted commands:\n- `nsd-control stats_noreset`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n- macOS\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **nsd** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **nsd**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/nsd.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | nsd-control binary execution timeout. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **nsd** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the nsd data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _nsd_ (or scroll the list) to locate the **nsd** collector.\n5. Click the **+** next to the **nsd** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/nsd.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nsd.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: nsd\n    update_every: 5  # Collect logical volume statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m nsd\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m nsd -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nsd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nsd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nsd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nsd\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NSD instance\n\nThese metrics refer to the the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nsd.queries | queries | queries/s |\n| nsd.queries_by_type | A, NS, MD, MF, CNAME, SOA, MB, MG, MR, NULL, WKS, PTR, HINFO, MINFO, MX, TXT, RP, AFSDB, X25, ISDN, RT, NSAP, SIG, KEY, PX, AAAA, LOC, NXT, SRV, NAPTR, KX, CERT, DNAME, OPT, APL, DS, SSHFP, IPSECKEY, RRSIG, NSEC, DNSKEY, DHCID, NSEC3, NSEC3PARAM, TLSA, SMIMEA, CDS, CDNSKEY, OPENPGPKEY, CSYNC, ZONEMD, SVCB, HTTPS, SPF, NID, L32, L64, LP, EUI48, EUI64, URI, CAA, AVC, DLV, IXFR, AXFR, MAILB, MAILA, ANY | queries/s |\n| nsd.queries_by_opcode | QUERY, IQUERY, STATUS, NOTIFY, UPDATE, OTHER | queries/s |\n| nsd.queries_by_class | IN, CS, CH, HS | queries/s |\n| nsd.queries_by_protocol | udp, udp6, tcp, tcp6, tls, tls6 | queries/s |\n| nsd.answers_by_rcode | NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED, YXDOMAIN, YXRRSET, NXRRSET, NOTAUTH, NOTZONE, RCODE11, RCODE12, RCODE13, RCODE14, RCODE15, BADVERS | answers/s |\n| nsd.errors | query, answer | errors/s |\n| nsd.drops | query | drops/s |\n| nsd.zones | master, slave | zones |\n| nsd.zone_transfers_requests | AXFR, IXFR | requests/s |\n| nsd.zone_transfer_memory | used | bytes |\n| nsd.database_size | disk, mem | bytes |\n| nsd.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-nsd-NSD",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/nsd/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-ntpd",plugin_name:"go.d.plugin",module_name:"ntpd",monitored_instance:{name:"NTPd",link:"https://www.ntp.org/documentation/4.2.8-series/ntpd",icon_filename:"ntp.png",categories:["data-collection.networking"]},keywords:["ntpd","ntp","time"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# NTPd\n\nPlugin: go.d.plugin\nModule: ntpd\n\n## Overview\n\nThis collector monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](https://doc.ntp.org/current-stable/ntpq.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **ntpd** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **ntpd**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/ntpd.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | NTP server address (`IP:PORT`). | 127.0.0.1:123 | yes |\n|  | timeout | Connection, read, and write timeout (seconds). | 1 | no |\n| **Metrics Selection** | collect_peers | Collect peer metrics. | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **ntpd** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the ntpd data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _ntpd_ (or scroll the list) to locate the **ntpd** collector.\n5. Click the **+** next to the **ntpd** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/ntpd.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ntpd.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:123\n\n```\n{% /details %}\n###### With peers metrics\n\nCollect peers metrics.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:123\n    collect_peers: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:123\n\n  - name: remote\n    address: 203.0.113.0:123\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ntpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m ntpd\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m ntpd -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ntpd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ntpd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ntpd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ntpd\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NTPd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.sys_offset | offset | milliseconds |\n| ntpd.sys_jitter | system, clock | milliseconds |\n| ntpd.sys_frequency | frequency | ppm |\n| ntpd.sys_wander | clock | ppm |\n| ntpd.sys_rootdelay | delay | milliseconds |\n| ntpd.sys_rootdisp | dispersion | milliseconds |\n| ntpd.sys_stratum | stratum | stratum |\n| ntpd.sys_tc | current, minimum | log2 |\n| ntpd.sys_precision | precision | log2 |\n\n### Per peer\n\nThese metrics refer to the NTPd peer.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| peer_address | peer's source IP address |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.peer_offset | offset | milliseconds |\n| ntpd.peer_delay | delay | milliseconds |\n| ntpd.peer_dispersion | dispersion | milliseconds |\n| ntpd.peer_jitter | jitter | milliseconds |\n| ntpd.peer_xleave | xleave | milliseconds |\n| ntpd.peer_rootdelay | rootdelay | milliseconds |\n| ntpd.peer_rootdisp | dispersion | milliseconds |\n| ntpd.peer_stratum | stratum | stratum |\n| ntpd.peer_hmode | hmode | hmode |\n| ntpd.peer_pmode | pmode | pmode |\n| ntpd.peer_hpoll | hpoll | log2 |\n| ntpd.peer_ppoll | ppoll | log2 |\n| ntpd.peer_precision | precision | log2 |\n\n",integration_type:"collector",id:"go.d.plugin-ntpd-NTPd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/ntpd/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-nvidia_smi",plugin_name:"go.d.plugin",module_name:"nvidia_smi",monitored_instance:{name:"Nvidia GPU",link:"https://www.nvidia.com/en-us/",icon_filename:"nvidia.svg",categories:["data-collection.hardware-and-sensors"]},keywords:["nvidia","gpu","hardware"],related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"dcgm"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# Nvidia GPU\n\nPlugin: go.d.plugin\nModule: nvidia_smi\n\n## Overview\n\nThis collector monitors GPUs performance metrics using\nthe [nvidia-smi](https://developer.nvidia.com/nvidia-system-management-interface) CLI tool.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nNvidia GPU can be monitored further using the following other integrations:\n\n- {% relatedResource id="go.d.plugin-dcgm-Nvidia_Data_Center_GPU_Manager_(DCGM)" %}Nvidia Data Center GPU Manager (DCGM){% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn\'t support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **nvidia_smi** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **nvidia_smi**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/nvidia_smi.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| binary_path | Path to nvidia_smi binary. The default is "nvidia_smi" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no |\n| timeout | The maximum duration, in seconds, to wait for an `nvidia-smi` command to complete. This setting applies differently based on the collector\'s mode. **Loop Mode:** In loop mode, the timeout primarily determines how long to wait for the initial `nvidia-smi` execution. If the initial query takes longer than the timeout, the collector may report an error. For systems with multiple GPUs, the initial load time can sometimes be significant (e.g., 5-10 seconds). **Regular Mode:** If the collector is in regular mode, the timeout specifies how long to wait for each individual `nvidia-smi` execution. | 10 | no |\n| loop_mode | When enabled, `nvidia-smi` is executed continuously in a separate thread using the `-l` option. | yes | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **nvidia_smi** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the nvidia_smi data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _nvidia_smi_ (or scroll the list) to locate the **nvidia_smi** collector.\n5. Click the **+** next to the **nvidia_smi** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/nvidia_smi.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvidia_smi.conf\n```\n\n##### Examples\n\n###### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: nvidia_smi\n    binary_path: /usr/local/sbin/nvidia_smi\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nvidia_smi` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m nvidia_smi\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m nvidia_smi -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nvidia_smi` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nvidia_smi\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nvidia_smi /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nvidia_smi\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) |\n| index | GPU index (nvidia_smi typically orders GPUs by PCI bus ID) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % |\n| nvidia_smi.gpu_utilization | gpu | % |\n| nvidia_smi.gpu_memory_utilization | memory | % |\n| nvidia_smi.gpu_decoder_utilization | decoder | % |\n| nvidia_smi.gpu_encoder_utilization | encoder | % |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B |\n| nvidia_smi.gpu_temperature | temperature | Celsius |\n| nvidia_smi.gpu_voltage | voltage | V |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz |\n| nvidia_smi.gpu_power_draw | power_draw | Watts |\n| nvidia_smi.gpu_performance_state | P0-P15 | state |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status |\n| nvidia_smi.gpu_mig_devices_count | mig | devices |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B |\n\n",integration_type:"collector",id:"go.d.plugin-nvidia_smi-Nvidia_GPU",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/nvidia_smi/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-nvme",plugin_name:"go.d.plugin",module_name:"nvme",monitored_instance:{name:"NVMe devices",link:"",icon_filename:"nvme.svg",categories:["data-collection.storage"]},keywords:["nvme"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# NVMe devices\n\nPlugin: go.d.plugin\nModule: nvme\n\n## Overview\n\nThis collector monitors the health of NVMe devices. It relies on the [`nvme`](https://github.com/linux-nvme/nvme-cli#nvme-cli) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **nvme** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **nvme**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/nvme.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install nvme-cli\n\nSee [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution\'s package manager.\n\n\n#### For Netdata running in a Docker container: grant NVMe device access\n\nYour NVMe devices need to be accessible within the Docker container for Netdata to monitor them.\n\nInclude the following option in your `docker run` command or add the device mapping in your `docker-compose.yml` file:\n\n- `docker run`\n\n  ```bash\n  --device \'/dev/nvme0n1:/dev/nvme0n1\'\n  ```\n\n- `docker-compose.yml`\n\n  ```yaml\n  services:\n    netdata:\n      devices:\n        - "/dev/nvme0n1:/dev/nvme0n1"\n  ```\n\n**Note**: Replace `/dev/nvme0n1` with your actual NVMe device name.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| timeout | nvme binary execution timeout. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **nvme** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the nvme data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _nvme_ (or scroll the list) to locate the **nvme** collector.\n5. Click the **+** next to the **nvme** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/nvme.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvme.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: nvme\n    update_every: 5  # Collect NVMe metrics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nvme` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m nvme\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m nvme -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nvme` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nvme\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nvme /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nvme\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ nvme_device_critical_warnings_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/nvme.conf) | nvme.device_critical_warnings_state | NVMe device ${label:device} has critical warnings |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the NVME device.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | NVMe device name |\n| model_number | NVMe device model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvme.device_estimated_endurance_perc | used | % |\n| nvme.device_available_spare_perc | spare | % |\n| nvme.device_composite_temperature | temperature | celsius |\n| nvme.device_io_transferred_count | read, written | bytes |\n| nvme.device_power_cycles_count | power | cycles |\n| nvme.device_power_on_time | power-on | seconds |\n| nvme.device_critical_warnings_state | available_spare, temp_threshold, nvm_subsystem_reliability, read_only, volatile_mem_backup_failed, persistent_memory_read_only | state |\n| nvme.device_unsafe_shutdowns_count | unsafe | shutdowns |\n| nvme.device_media_errors_rate | media | errors/s |\n| nvme.device_error_log_entries_rate | error_log | entries/s |\n| nvme.device_warning_composite_temperature_time | wctemp | seconds |\n| nvme.device_critical_composite_temperature_time | cctemp | seconds |\n| nvme.device_thermal_mgmt_temp1_transitions_rate | temp1 | transitions/s |\n| nvme.device_thermal_mgmt_temp2_transitions_rate | temp2 | transitions/s |\n| nvme.device_thermal_mgmt_temp1_time | temp1 | seconds |\n| nvme.device_thermal_mgmt_temp2_time | temp2 | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-nvme-NVMe_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/nvme/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"go.d.plugin",module_name:"openldap",monitored_instance:{name:"OpenLDAP",link:"https://www.openldap.org/",categories:["data-collection.applications"],icon_filename:"openldap.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["openldap","RBAC","Directory access"]},overview:"# OpenLDAP\n\nPlugin: go.d.plugin\nModule: openldap\n\n## Overview\n\nThis collector monitors OpenLDAP metrics about connections, operations, referrals and more.\n\n\nIt gathers the metrics using the [go-ldap](https://github.com/go-ldap/ldap) module and the [Monitor backend](https://www.openldap.org/doc/admin24/monitoringslapd.html) of OpenLDAP.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector cannot auto-detect OpenLDAP instances, because credential configuration is required.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **openldap** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **openldap**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/openldap.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable the openLDAP Monitor Backend.\n\nFollow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | LDAP server URL. | ldap://127.0.0.1:389 | yes |\n|  | timeout | Connection and communication timeout (seconds). | 2 | no |\n| **Auth** | username | Distinguished Name (DN) of the user authorized to query the monitor database. |  | yes |\n|  | password | Password for the DN user. |  | yes |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **openldap** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the openldap data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _openldap_ (or scroll the list) to locate the **openldap** collector.\n5. Click the **+** next to the **openldap** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/openldap.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openldap.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: ldap://localhost:389\n    username: cn=netdata,dc=example,dc=com \n    password: secret\n\n```\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n  - name: local\n    url: ldap://localhost:389\n    username: cn=netdata,dc=example,dc=com \n    password: secret\n\n  - name: remote\n    url: ldap://192.0.2.1:389\n    username: cn=netdata,dc=example,dc=com \n    password: secret\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `openldap` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m openldap\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m openldap -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `openldap` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep openldap\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep openldap /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep openldap\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenLDAP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openldap.current_connections | active | connections |\n| openldap.connections | connections | connections/s |\n| openldap.traffic | sent | bytes/s |\n| openldap.entries | sent | entries/s |\n| openldap.referrals | sent | referrals/s |\n| openldap.operations | completed, initiated | operations/s |\n| openldap.operations_by_type | bind, search, unbind, add, delete, modify, compare | operations/s |\n| openldap.waiters | write, read | waiters/s |\n\n",integration_type:"collector",id:"go.d.plugin-openldap-OpenLDAP",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/openldap/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-openvpn",plugin_name:"go.d.plugin",module_name:"openvpn",monitored_instance:{name:"OpenVPN",link:"https://openvpn.net/",icon_filename:"openvpn.svg",categories:["data-collection.networking"]},keywords:["openvpn","vpn"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# OpenVPN\n\nPlugin: go.d.plugin\nModule: openvpn\n\n## Overview\n\nThis collector monitors OpenVPN servers.\n\nIt uses OpenVPN [Management Interface](https://openvpn.net/community-resources/management-interface/) to collect metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **openvpn** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **openvpn**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/openvpn.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d.conf).\n\nFrom the documentation for the OpenVPN Management Interface:\n> Currently, the OpenVPN daemon can at most support a single management client any one time.\n\nIt is disabled to not break other tools which use `Management Interface`.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | OpenVPN server address (`IP:PORT`). | 127.0.0.1:7505 | yes |\n|  | timeout | Connection, read, write, and name resolution timeout (seconds). | 1 | no |\n| **Filters** | per_user_stats | User selector. Defines which user metrics to collect. |  | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **openvpn** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the openvpn data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _openvpn_ (or scroll the list) to locate the **openvpn** collector.\n5. Click the **+** next to the **openvpn** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/openvpn.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:7505\n\n```\n{% /details %}\n###### With user metrics\n\nCollect metrics of all users.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:7505\n    per_user_stats:\n      includes:\n        - "* *"\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:7505\n\n  - name: remote\n    address: 203.0.113.0:7505\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `openvpn` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m openvpn\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m openvpn -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `openvpn` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep openvpn\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep openvpn /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep openvpn\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-openvpn-OpenVPN",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/openvpn/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-openvpn_status_log",plugin_name:"go.d.plugin",module_name:"openvpn_status_log",monitored_instance:{name:"OpenVPN status log",link:"https://openvpn.net/",icon_filename:"openvpn.svg",categories:["data-collection.networking"]},keywords:["openvpn","vpn"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# OpenVPN status log\n\nPlugin: go.d.plugin\nModule: openvpn_status_log\n\n## Overview\n\nThis collector monitors OpenVPN server.\n\nIt parses server log files and provides summary and per user metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **openvpn_status_log** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **openvpn_status_log**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/openvpn_status_log.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| log_path | Path to status log. | /var/log/openvpn/status.log | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **openvpn_status_log** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the openvpn_status_log data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _openvpn_status_log_ (or scroll the list) to locate the **openvpn_status_log** collector.\n5. Click the **+** next to the **openvpn_status_log** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/openvpn_status_log.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn_status_log.conf\n```\n\n##### Examples\n\n###### With user metrics\n\nCollect metrics of all users.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    per_user_stats:\n      includes:\n        - "* *"\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `openvpn_status_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m openvpn_status_log\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m openvpn_status_log -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `openvpn_status_log` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep openvpn_status_log\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep openvpn_status_log /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep openvpn_status_log\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN status log instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-openvpn_status_log-OpenVPN_status_log",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/openvpn_status_log/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-oracledb",plugin_name:"go.d.plugin",module_name:"oracledb",monitored_instance:{name:"Oracle DB",link:"https://www.oracle.com/database/",categories:["data-collection.databases"],icon_filename:"oracle.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["database","oracle","sql"]},overview:"# Oracle DB\n\nPlugin: go.d.plugin\nModule: oracledb\n\n## Overview\n\nThis collector monitors the health and performance of Oracle DB servers and collects general statistics, replication and user metrics.\n\n\nIt establishes a connection to the Oracle DB instance via a TCP or UNIX socket and extracts metrics from the following database tables:\n\n- `v$sysmetric`\n- `v$sysstat`\n- `v$waitclassmetric`\n- `v$system_wait_class`\n- `dba_data_files`\n- `dba_free_space`\n- `dba_segments`\n- `dba_temp_files`\n- `dba_tablespaces`\n- `v$temp_space_header`\n\nIt also provides `top-queries` and `running-queries` functions using `V$SQLSTATS` and `V$SESSION`.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect Oracle DB instances running on:\n\n- Localhost, listening on port 1521\n- Within Docker containers\n\n> **Note**: Oracle DB requires a username and password. While Netdata can automatically discover Oracle DB instances and create data collection jobs, these jobs will fail unless you provide the correct credentials.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **oracledb** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **oracledb**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/oracledb.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Create a read only user for netdata\n\nFollow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach\n\nConnect to your Oracle database with an administrative user and execute:\n\n```bash\nCREATE USER netdata IDENTIFIED BY <PASSWORD>;\n\nGRANT CONNECT TO netdata;\nGRANT SELECT_CATALOG_ROLE TO netdata;\n```\n\nThe `top-queries` and `running-queries` functions require access to `V$SQLSTATS` and `V$SESSION`.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | dsn | Oracle server DSN (Data Source Name). Format: `oracle://username:password@host:port/service?param1=value1&...&paramN=valueN`. |  | yes |\n|  | timeout | Query timeout (seconds). | 1 | no |\n| **Functions** | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n|  | functions.running_queries.disabled | Disable the [running-queries](#running-queries) function. | no | no |\n|  | functions.running_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.running_queries.limit | Maximum number of queries to return. | 500 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **oracledb** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the oracledb data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _oracledb_ (or scroll the list) to locate the **oracledb** collector.\n5. Click the **+** next to the **oracledb** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/oracledb.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/oracledb.conf\n```\n\n##### Examples\n\n###### TCP socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    dsn: oracle://netdata:secret@127.0.0.1:1521/XE\n\n```\n{% /details %}\n###### TLS connection (TCPS)\n\nAn example configuration for TLS connection.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    dsn: \'oracle://netdata:secret@127.0.0.1:1521/XE?ssl=true&ssl verify=true\'\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    dsn: oracle://netdata:secret@127.0.0.1:1521/XE\n\n  - name: remote\n    dsn: oracle://netdata:secret@203.0.113.0:1521/XE\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `oracledb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m oracledb\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m oracledb -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `oracledb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep oracledb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep oracledb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep oracledb\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves aggregated SQL statement performance metrics from Oracle [V$SQLSTATS](https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/V-SQLSTATS.html) view.\n\nThis function queries `V$SQLSTATS` which provides SQL execution statistics aggregated across all cursors for each SQL statement. Statistics include execution counts, timing metrics, I/O operations, and resource consumption.\n\nUse cases:\n- Identify slow queries consuming the most total execution time\n- Find queries with high buffer gets or disk reads for I/O optimization\n- Analyze CPU-intensive queries for resource tuning\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Oracledb:top-queries` |\n| Require Cloud | yes |\n| Performance | Queries `V$SQLSTATS` which is a lightweight view optimized for statistics retrieval:<br/>\u2022 On busy databases with many SQL statements, the query may take longer<br/>\u2022 Default limit of 500 rows balances usefulness with performance |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in WHERE clauses or INSERT values<br/>\u2022 Business data and internal identifiers<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to Oracle DB<br/>\u2022 The user has SELECT privilege on `V$SQLSTATS`<br/>\u2022 Returns HTTP 503 if the connection cannot be established<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Grant access to V$SQLSTATS\n\nThe monitoring user must have SELECT privilege on `V$SQLSTATS` and related views.\n\n1. Grant the required privileges:\n\n   ```sql\n   -- Note: Use V_$ (with underscore) for GRANT - this is the base fixed view\n   -- Queries use the V$ public synonym\n   GRANT SELECT ON V_$SQLSTATS TO netdata;\n   -- Or grant the broader role:\n   GRANT SELECT_CATALOG_ROLE TO netdata;\n   ```\n\n2. Verify access:\n\n   ```sql\n   SELECT COUNT(*) FROM V$SQLSTATS WHERE ROWNUM <= 1;\n   ```\n\n:::info\n\n- `V$SQLSTATS` is available in Oracle 10g and later\n- The view aggregates statistics across all child cursors for each SQL statement\n- Some columns like `MODULE` and `ACTION` require applications to set them via `DBMS_APPLICATION_INFO`\n\n:::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Options include total time, CPU time, executions, buffer gets, disk reads, and more. Defaults to total time to focus on most resource-intensive queries. | yes | totalTime |  |\n\n#### Returns\n\nAggregated SQL statistics from `V$SQLSTATS`. Each row represents a unique SQL statement with cumulative metrics across all executions.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| SQL ID | string |  | hidden | Unique identifier for the SQL statement in the shared pool. Can be used to find execution plans in `V$SQL_PLAN`. |\n| Query | string |  |  | SQL statement text. Truncated to 4096 characters for display purposes. |\n| Schema | string |  |  | Schema under which the SQL was parsed. Useful for identifying which application or user generated the query. |\n| Executions | integer |  |  | Total number of times this SQL statement has been executed. High values indicate frequently run queries. |\n| Total Time | duration | milliseconds |  | Cumulative elapsed time across all executions. High values indicate queries consuming significant database resources. |\n| Avg Time | duration | milliseconds |  | Average elapsed time per execution. Use this to compare typical performance across different SQL statements. |\n| CPU Time | duration | milliseconds |  | Cumulative CPU time consumed across all executions. Compare with total time to identify I/O-bound vs CPU-bound queries. |\n| Buffer Gets | integer |  |  | Total number of logical reads from the buffer cache. High values relative to rows processed may indicate inefficient queries. |\n| Disk Reads | integer |  |  | Total number of physical reads from disk. High values indicate queries that cannot be satisfied from the buffer cache. |\n| Rows Processed | integer |  |  | Total number of rows processed across all executions. Compare with buffer gets to assess query efficiency. |\n| Parse Calls | integer |  | hidden | Number of times the SQL was parsed (hard + soft parses). High values may indicate lack of bind variables. |\n| Module | string |  | hidden | Application module name set via `DBMS_APPLICATION_INFO`. Useful for identifying which application component generated the query. |\n| Action | string |  | hidden | Application action name set via `DBMS_APPLICATION_INFO`. Provides finer-grained identification within a module. |\n| Last Active | string |  | hidden | Timestamp when this SQL statement was last executed. Helps identify recently active vs historical queries. |\n\n### Running Queries\n\nRetrieves currently executing SQL statements from Oracle [V$SESSION](https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/V-SESSION.html) view.\n\nThis function queries `V$SESSION` joined with `V$SQL` to provide a real-time snapshot of all active user sessions currently executing SQL statements. It shows session details, elapsed time, and the SQL being executed.\n\nUse cases:\n- Identify long-running queries that may be blocking other sessions\n- Monitor active workload and session distribution\n- Debug stuck or slow queries in real-time\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Oracledb:running-queries` |\n| Require Cloud | yes |\n| Performance | Queries `V$SESSION` joined with `V$SQL` for currently active sessions:<br/>\u2022 Lightweight operation as it only returns currently active user sessions<br/>\u2022 Default limit of 500 rows (rarely reached for running queries) |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in WHERE clauses or INSERT values<br/>\u2022 Business data and credentials in query parameters<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to Oracle DB<br/>\u2022 The user has SELECT privilege on `V$SESSION` and `V$SQL`<br/>\u2022 Returns HTTP 503 if the connection cannot be established<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Grant access to V$SESSION\n\nThe monitoring user must have SELECT privilege on `V$SESSION` and `V$SQL`.\n\n1. Grant the required privileges:\n\n   ```sql\n   -- Note: Use V_$ (with underscore) for GRANT - this is the base fixed view\n   -- Queries use the V$ public synonym\n   GRANT SELECT ON V_$SESSION TO netdata;\n   GRANT SELECT ON V_$SQL TO netdata;\n   -- Or grant the broader role:\n   GRANT SELECT_CATALOG_ROLE TO netdata;\n   ```\n\n2. Verify access:\n\n   ```sql\n   SELECT COUNT(*) FROM V$SESSION WHERE ROWNUM <= 1;\n   ```\n\n:::info\n\n- Only USER sessions with ACTIVE status and a current SQL ID are returned\n- The elapsed time is based on `LAST_CALL_ET` which resets when a new SQL starts\n- BACKGROUND sessions (Oracle internal processes) are filtered out\n\n:::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Defaults to elapsed time to show longest-running queries first. | yes | lastCallMs |  |\n\n#### Returns\n\nReal-time snapshot of currently executing SQL statements. Each row represents an active user session with its current SQL.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Session | string |  |  | Session identifier in format `SID,SERIAL#`. Can be used with `ALTER SYSTEM KILL SESSION` if needed. |\n| User | string |  |  | Oracle username of the session. Useful for identifying workload by user. |\n| Status | string |  |  | Session status (ACTIVE for currently executing). Only active sessions with SQL are shown. |\n| Type | string |  | hidden | Session type (USER or BACKGROUND). This function filters to USER sessions only. |\n| SQL ID | string |  | hidden | Identifier of the currently executing SQL. Can be used to find the statement in `V$SQL`. |\n| Query | string |  |  | SQL statement text currently being executed. Truncated to 4096 characters. |\n| Elapsed | duration | milliseconds |  | Time elapsed since the session's last call started. High values indicate long-running operations that may need investigation. |\n| SQL Exec Start | string |  | hidden | Timestamp when the current SQL execution started. |\n| Module | string |  | hidden | Application module name set via `DBMS_APPLICATION_INFO`. Identifies which application is running the query. |\n| Action | string |  | hidden | Application action name set via `DBMS_APPLICATION_INFO`. |\n| Program | string |  | hidden | Client program name that established the session (e.g., sqlplus, JDBC Thin Client). |\n| Machine | string |  | hidden | Client machine name or IP address. Useful for identifying query sources. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Oracle DB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.sessions | session | sessions |\n| oracledb.average_active_sessions | active | sessions |\n| oracledb.sessions_utilization | session_limit | percent |\n| oracledb.current_logons | logons | logons |\n| oracledb.logons | logons | logons/s |\n| oracledb.database_wait_time_ratio | db_wait_time | percent |\n| oracledb.sql_service_response_time | sql_resp_time | seconds |\n| oracledb.enqueue_timeouts | enqueue | timeouts/s |\n| oracledb.disk_io | read, written | bytes/s |\n| oracledb.disk_iops | read, write | operations/s |\n| oracledb.sorts | memory, disk | sorts/s |\n| oracledb.table_scans | short_table, long_table | scans/s |\n| oracledb.cache_hit_ratio | buffer, cursor, library, row | percent |\n| oracledb.global_cache_blocks | corrupted, lost | blocks/s |\n| oracledb.activity | parse, execute, user_commits, user_rollbacks | events/s |\n\n### Per tablespace\n\nThese metrics refer to the Tablespace.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| tablespace | Tablespace name. |\n| autoextend_status | Autoextend status (enabled, disabled, mixed). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.tablespace_utilization | utilization | percent |\n| oracledb.tablespace_usage | avail, used | bytes |\n\n### Per wait class\n\nThese metrics refer to the [Wait Class](https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/classes-of-wait-events.html).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| wait_class | [Wait Class name](https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/classes-of-wait-events.html). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.wait_class_wait_time | wait_time | milliseconds |\n\n",integration_type:"collector",id:"go.d.plugin-oracledb-Oracle_DB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/oracledb/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-pgbouncer",plugin_name:"go.d.plugin",module_name:"pgbouncer",monitored_instance:{name:"PgBouncer",link:"https://www.pgbouncer.org/",icon_filename:"postgres.svg",categories:["data-collection.databases"]},keywords:["pgbouncer"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# PgBouncer\n\nPlugin: go.d.plugin\nModule: pgbouncer\n\n## Overview\n\nThis collector monitors PgBouncer servers.\n\nExecuted queries:\n\n- `SHOW VERSION;`\n- `SHOW CONFIG;`\n- `SHOW DATABASES;`\n- `SHOW STATS;`\n- `SHOW POOLS;`\n\nInformation about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **pgbouncer** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **pgbouncer**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/pgbouncer.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with `stats_users` permissions to query your PgBouncer instance.\n\nTo create the `netdata` user:\n\n- Add `netdata` user to the `pgbouncer.ini` file:\n\n  ```text\n  stats_users = netdata\n  ```\n\n- Add a password for the `netdata` user to the `userlist.txt` file:\n\n  ```text\n  "netdata" "<PASSWORD>"\n  ```\n\n- To verify the credentials, run the following command\n\n  ```bash\n  psql -h localhost -U netdata -p 6432 pgbouncer -c "SHOW VERSION;" >/dev/null 2>&1 && echo OK || echo FAIL\n  ```\n\n  When it prompts for a password, enter the password you added to `userlist.txt`.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | dsn | PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:6432/pgbouncer | yes |\n|  | timeout | Query timeout (seconds). | 1 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **pgbouncer** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the pgbouncer data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _pgbouncer_ (or scroll the list) to locate the **pgbouncer** collector.\n5. Click the **+** next to the **pgbouncer** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/pgbouncer.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pgbouncer.conf\n```\n\n##### Examples\n\n###### TCP socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    dsn: \'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer\'\n\n```\n{% /details %}\n###### Unix socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    dsn: \'host=/tmp dbname=pgbouncer user=postgres port=6432\'\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    dsn: \'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer\'\n\n  - name: remote\n    dsn: \'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer\'\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pgbouncer` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m pgbouncer\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m pgbouncer -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pgbouncer` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pgbouncer\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pgbouncer /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pgbouncer\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PgBouncer instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.client_connections_utilization | used | percentage |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| database | database name |\n| postgres_database | Postgres database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.db_client_connections | active, waiting, cancel_req | connections |\n| pgbouncer.db_server_connections | active, idle, used, tested, login | connections |\n| pgbouncer.db_server_connections_utilization | used | percentage |\n| pgbouncer.db_clients_wait_time | time | seconds |\n| pgbouncer.db_client_max_wait_time | time | seconds |\n| pgbouncer.db_transactions | transactions | transactions/s |\n| pgbouncer.db_transactions_time | time | seconds |\n| pgbouncer.db_transaction_avg_time | time | seconds |\n| pgbouncer.db_queries | queries | queries/s |\n| pgbouncer.db_queries_time | time | seconds |\n| pgbouncer.db_query_avg_time | time | seconds |\n| pgbouncer.db_network_io | received, sent | B/s |\n\n",integration_type:"collector",id:"go.d.plugin-pgbouncer-PgBouncer",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/pgbouncer/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-phpdaemon",plugin_name:"go.d.plugin",module_name:"phpdaemon",monitored_instance:{name:"phpDaemon",link:"https://github.com/kakserpom/phpdaemon",icon_filename:"php.svg",categories:["data-collection.applications"]},keywords:["phpdaemon","php"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# phpDaemon\n\nPlugin: go.d.plugin\nModule: phpdaemon\n\n## Overview\n\nThis collector monitors phpDaemon instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\nYou can configure the **phpdaemon** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **phpdaemon**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/phpdaemon.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable phpDaemon's HTTP server\n\nStatistics expected to be in JSON format.\n\n<details>\n<summary>phpDaemon configuration</summary>\n\nInstruction from [@METAJIJI](https://github.com/METAJIJI).\n\nTo enable `phpd` statistics on http, you must enable the http server and write an application.\nApplication is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`.\n\n```php\n// /opt/phpdaemon/conf/phpd.conf\n\npath /opt/phpdaemon/conf/AppResolver.php;\nPool:HTTPServer {\n    privileged;\n    listen '127.0.0.1';\n    port 8509;\n}\n```\n\n```php\n// /opt/phpdaemon/conf/AppResolver.php\n\n<?php\n\nclass MyAppResolver extends \\PHPDaemon\\Core\\AppResolver {\n    public function getRequestRoute($req, $upstream) {\n        if (preg_match('~^/(ServerStatus|FullStatus)/~', $req->attrs->server['DOCUMENT_URI'], $m)) {\n            return $m[1];\n        }\n    }\n}\n\nreturn new MyAppResolver;\n```\n\n```php\n/opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php\n\n<?php\nnamespace PHPDaemon\\Applications;\n\nclass FullStatus extends \\PHPDaemon\\Core\\AppInstance {\n    public function beginRequest($req, $upstream) {\n        return new FullStatusRequest($this, $upstream, $req);\n    }\n}\n```\n\n```php\n// /opt/phpdaemon/conf/PHPDaemon/Applications/FullStatusRequest.php\n\n<?php\nnamespace PHPDaemon\\Applications;\n\nuse PHPDaemon\\Core\\Daemon;\nuse PHPDaemon\\HTTPRequest\\Generic;\n\nclass FullStatusRequest extends Generic {\n    public function run() {\n        $stime = microtime(true);\n        $this->header('Content-Type: application/javascript; charset=utf-8');\n\n        $stat = Daemon::getStateOfWorkers();\n        $stat['uptime'] = time() - Daemon::$startTime;\n        echo json_encode($stat);\n    }\n}\n```\n\n</details>\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8509/FullStatus | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **phpdaemon** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the phpdaemon data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _phpdaemon_ (or scroll the list) to locate the **phpdaemon** collector.\n5. Click the **+** next to the **phpdaemon** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/phpdaemon.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpdaemon.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8509/FullStatus\n\n```\n{% /details %}\n###### HTTP authentication\n\nHTTP authentication.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8509/FullStatus\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nHTTPS with self-signed certificate.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8509/FullStatus\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8509/FullStatus\n\n  - name: remote\n    url: http://192.0.2.1:8509/FullStatus\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `phpdaemon` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m phpdaemon\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m phpdaemon -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `phpdaemon` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep phpdaemon\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep phpdaemon /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep phpdaemon\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per phpDaemon instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpdaemon.workers | alive, shutdown | workers |\n| phpdaemon.alive_workers | idle, busy, reloading | workers |\n| phpdaemon.idle_workers | preinit, init, initialized | workers |\n| phpdaemon.uptime | time | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-phpdaemon-phpDaemon",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/phpdaemon/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-phpfpm",plugin_name:"go.d.plugin",module_name:"phpfpm",monitored_instance:{name:"PHP-FPM",link:"https://php-fpm.org/",icon_filename:"php.svg",categories:["data-collection.web-servers-and-proxies"]},keywords:["phpfpm","php"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# PHP-FPM\n\nPlugin: go.d.plugin\nModule: phpfpm\n\n## Overview\n\nThis collector monitors PHP-FPM instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **phpfpm** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **phpfpm**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/phpfpm.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable status page\n\nUncomment the `pm.status_path = /status` variable in the `php-fpm` config file.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | PHP-FPM URL. | http://127.0.0.1/server-status?auto | yes |\n|  | address | PHP-FPM TCP listening address in IP:PORT format. Preferred over `url` if set. |  | no |\n|  | socket | PHP-FPM Unix socket. Preferred over both `url` and `address` if set. |  | no |\n|  | fcgi_path | URI path to the [FPM status page](https://www.php.net/manual/en/fpm.status.php). | /status | no |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **phpfpm** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the phpfpm data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _phpfpm_ (or scroll the list) to locate the **phpfpm** collector.\n5. Click the **+** next to the **phpfpm** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/phpfpm.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpfpm.conf\n```\n\n##### Examples\n\n###### HTTP\n\nCollecting data from a local instance over HTTP.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://localhost/status?full&json\n\n```\n{% /details %}\n###### Unix socket\n\nCollecting data from a local instance over Unix socket.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    socket: \'/tmp/php-fpm.sock\'\n\n```\n{% /details %}\n###### TCP socket\n\nCollecting data from a local instance over TCP socket.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:9000\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n    - name: local\n      url: http://localhost/status?full&json\n\n    - name: remote\n      url: http://203.0.113.10/status?full&json\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `phpfpm` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m phpfpm\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m phpfpm -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `phpfpm` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep phpfpm\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep phpfpm /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep phpfpm\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PHP-FPM instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpfpm.connections | active, max_active, idle | connections |\n| phpfpm.requests | requests | requests/s |\n| phpfpm.performance | max_children_reached, slow_requests | status |\n| phpfpm.request_duration | min, max, avg | milliseconds |\n| phpfpm.request_cpu | min, max, avg | percentage |\n| phpfpm.request_mem | min, max, avg | KB |\n\n",integration_type:"collector",id:"go.d.plugin-phpfpm-PHP-FPM",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/phpfpm/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-pihole",plugin_name:"go.d.plugin",module_name:"pihole",monitored_instance:{name:"Pi-hole",link:"https://pi-hole.net",icon_filename:"pihole.png",categories:["data-collection.networking"]},keywords:["pihole"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Pi-hole\n\nPlugin: go.d.plugin\nModule: pihole\n\n## Overview\n\nThis collector monitors Pi-hole instances using [Pi-hole API 6.0](https://ftl.pi-hole.net/master/docs/).\n\nIt collects DNS query statistics including total queries, blocked domains, query types, resolution status, and client information.\n\n**Note**: This collector is not compatible with Pi-hole versions earlier than v6.0.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Pi-hole instances running on:\n\n- localhost that are listening on port 80\n- within Docker containers\n\n> **Note that the Pi-hole API 6.0 requires a password**. \n> While Netdata can automatically detect Pi-hole instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **pihole** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **pihole**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/pihole.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Pi-hole v6.0 or newer\n\nThis collector requires Pi-hole v6.0 or newer as it uses the [Pi-hole API 6.0](https://ftl.pi-hole.net/master/docs/).\n\n\n#### Authentication credentials\n\nPi-hole administrator password is required for API authentication. Make sure to configure this in the collector settings even when using auto-detection.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | yes |\n|  | password | Password for Basic HTTP authentication. |  | yes |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **pihole** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the pihole data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _pihole_ (or scroll the list) to locate the **pihole** collector.\n5. Click the **+** next to the **pihole** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/pihole.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pihole.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1\n    password: Gv7#pQm9Xy\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nRemote instance with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://203.0.113.11\n    tls_skip_verify: yes\n    password: bT4@zK1wVr\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1\n    password: Gv7#pQm9Xy\n\n  - name: remote\n    url: http://203.0.113.10\n    password: bT4@zK1wVr\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pihole` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m pihole\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m pihole -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pihole` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pihole\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pihole /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pihole\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ pihole_gravity_list_last_update ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.gravity_list_last_update_time_ago | gravity.list (blocklist) file last update time |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pi-hole instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pihole.dns_queries_total | queries | queries/s |\n| pihole.dns_queries_blocked_percent | blocked | percent |\n| pihole.dns_queries_by_destination | cached, blocked, forwarded | queries/s |\n| pihole.dns_queries_by_type | A, AAA, ANY, SRV, SOA, PTR, TXT, NAPTR, MX, DS, RRSIG, DNSKEY, NS, SVCB, HTTPS, OTHER | queries/s |\n| pihole.dns_queries_by_status | UNKNOWN, GRAVITY, FORWARDED, CACHE, REGEX, DENYLIST, EXTERNAL_BLOCKED_IP, EXTERNAL_BLOCKED_NULL, EXTERNAL_BLOCKED_NXRA, GRAVITY_CNAME, REGEX_CNAME, DENYLIST_CNAME, RETRIED, RETRIED_DNSSEC, IN_PROGRESS, DBBUSY, SPECIAL_DOMAIN, CACHE_STALE, EXTERNAL_BLOCKED_EDE15 | queries/s |\n| pihole.dns_replies_by_status | UNKNOWN, NODATA, NXDOMAIN, CNAME, IP, DOMAIN, RRNAME, SERVFAIL, REFUSED, NOTIMP, DNSSEC, NONE, OTHER | replies/s |\n| pihole.active_clients | active | clients |\n| pihole.gravity_list_blocked_domains | blocked | domains |\n| pihole.gravity_list_last_update_time_ago | last_update_ago | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-pihole-Pi-hole",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/pihole/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-pika",plugin_name:"go.d.plugin",module_name:"pika",monitored_instance:{name:"Pika",link:"https://github.com/OpenAtomFoundation/pika",icon_filename:"pika.svg",categories:["data-collection.databases"]},keywords:["pika","databases"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Pika\n\nPlugin: go.d.plugin\nModule: pika\n\n## Overview\n\nThis collector monitors Pika servers.\n\nIt collects information and statistics about the server executing the following commands:\n\n- [`INFO ALL`](https://github.com/OpenAtomFoundation/pika/wiki/pika-info%E4%BF%A1%E6%81%AF%E8%AF%B4%E6%98%8E)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\nYou can configure the **pika** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **pika**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/pika.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | Pika server address. | redis://@localhost:9221 | yes |\n|  | timeout | Dial (establishing new connections), read (socket reads), and write (socket writes) timeout (seconds). | 1 | no |\n| **Auth** | username | Username for authentication. |  | no |\n|  | password | Password for authentication. |  | no |\n| **TLS** | tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n|  | tls_ca | Certification authority that the client uses when verifying the server\u2019s certificates. |  | no |\n|  | tls_cert | Client TLS certificate. |  | no |\n|  | tls_key | Client TLS key. |  | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **pika** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the pika data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _pika_ (or scroll the list) to locate the **pika** collector.\n5. Click the **+** next to the **pika** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/pika.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pika.conf\n```\n\n##### Examples\n\n###### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    address: 'redis://@localhost:9221'\n\n```\n{% /details %}\n###### TCP socket with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    address: 'redis://:password@127.0.0.1:9221'\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    address: 'redis://:password@127.0.0.1:9221'\n\n  - name: remote\n    address: 'redis://user:password@203.0.113.0:9221'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pika` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m pika\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m pika -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pika` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pika\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pika /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pika\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pika instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pika.connections | accepted | connections |\n| pika.clients | connected | clients |\n| pika.memory | used | bytes |\n| pika.connected_replicas | connected | replicas |\n| pika.commands | processed | commands/s |\n| pika.commands_calls | a dimension per command | calls/s |\n| pika.database_strings_keys | a dimension per database | keys |\n| pika.database_strings_expires_keys | a dimension per database | keys |\n| pika.database_strings_invalid_keys | a dimension per database | keys |\n| pika.database_hashes_keys | a dimension per database | keys |\n| pika.database_hashes_expires_keys | a dimension per database | keys |\n| pika.database_hashes_invalid_keys | a dimension per database | keys |\n| pika.database_lists_keys | a dimension per database | keys |\n| pika.database_lists_expires_keys | a dimension per database | keys |\n| pika.database_lists_invalid_keys | a dimension per database | keys |\n| pika.database_zsets_keys | a dimension per database | keys |\n| pika.database_zsets_expires_keys | a dimension per database | keys |\n| pika.database_zsets_invalid_keys | a dimension per database | keys |\n| pika.database_sets_keys | a dimension per database | keys |\n| pika.database_sets_expires_keys | a dimension per database | keys |\n| pika.database_sets_invalid_keys | a dimension per database | keys |\n| pika.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-pika-Pika",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/pika/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-ping",plugin_name:"go.d.plugin",module_name:"ping",monitored_instance:{name:"Ping",link:"",icon_filename:"globe.svg",categories:["data-collection.synthetic-testing"]},keywords:["ping"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:'# Ping\n\nPlugin: go.d.plugin\nModule: ping\n\n## Overview\n\nThis module measures round-trip time and packet loss by sending ping messages to network hosts.\n\nThere are two operational modes:\n\n- **Privileged** (send raw ICMP ping, default). Requires the necessary permissions ([CAP_NET_RAW](https://man7.org/linux/man-pages/man7/capabilities.7.html) on Linux, `setuid` bit on other systems).\n\n  These permissions are **automatically** set during Netdata installation. However, if you need to set them manually:\n    - set `CAP_NET_RAW` (Linux only).\n      ```bash\n      sudo setcap CAP_NET_RAW=eip <INSTALL_PREFIX>/usr/libexec/netdata/plugins.d/go.d.plugin\n      ```\n    - set `setuid` bit (Other OS).\n      ```bash\n      sudo chmod 4750 <INSTALL_PREFIX>/usr/libexec/netdata/plugins.d/go.d.plugin\n      ```\n\n- **Unprivileged** (send UDP ping, Linux only). Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html):\n\n  This configuration is **not set automatically** and requires manual configuration.\n\n  ```bash\n  sudo sysctl -w net.ipv4.ping_group_range="0 2147483647"\n  ```\n\n  To persist the change add `net.ipv4.ping_group_range=0 2147483647` to `/etc/sysctl.conf` and execute `sudo sysctl -p`.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn\'t support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **ping** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **ping**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/ping.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | hosts | List of hosts to ping. | [] | yes |\n| **Ping Settings** | network | DNS resolution mode. Options: `ip` (IPv4 or IPv6), `ip4` (IPv4 only), `ip6` (IPv6 only). | ip | no |\n|  | interface | Network interface to use for ICMP echo requests (e.g., `eth0`, `wlan0`). |  | no |\n|  | privileged | Ping packet type. `yes` = raw ICMP ping, `no` = unprivileged UDP ping. | yes | yes |\n|  | packets | Number of ping packets to send per iteration. | 5 | no |\n|  | interval | Interval between sending ping packets. | 100ms | no |\n| **Jitter Settings** | jitter_ewma_samples | EWMA smoothing factor for jitter calculation. Higher values = smoother, slower response. | 16 | no |\n|  | jitter_sma_window | Number of iterations for SMA jitter calculation. | 10 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **ping** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the ping data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _ping_ (or scroll the list) to locate the **ping** collector.\n5. Click the **+** next to the **ping** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/ping.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ping.conf\n```\n\n##### Examples\n\n###### IPv4 hosts\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: example\n    hosts:\n      - 192.0.2.0\n      - 192.0.2.1\n\n```\n{% /details %}\n###### Unprivileged mode\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: example\n    privileged: no\n    hosts:\n      - 192.0.2.0\n      - 192.0.2.1\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: example1\n    hosts:\n      - 192.0.2.0\n      - 192.0.2.1\n\n  - name: example2\n    packets: 10\n    hosts:\n      - 192.0.2.3\n      - 192.0.2.4\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `ping` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m ping\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m ping -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `ping` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep ping\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep ping /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep ping\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ ping_host_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | network host ${label:host} reachability status |\n| [ ping_packet_loss ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | packet loss percentage to the network host ${label:host} over the last 10 minutes |\n| [ ping_host_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_rtt | average latency to the network host ${label:host} over the last 10 seconds |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per host\n\nThese metrics refer to the remote host.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| host | remote host |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ping.host_rtt | min, max, avg | milliseconds |\n| ping.host_std_dev_rtt | std_dev | milliseconds |\n| ping.host_jitter | mean, ewma, sma | milliseconds |\n| ping.host_rtt_variance | variance | ms\xb2 |\n| ping.host_packet_loss | loss | percentage |\n| ping.host_packets | received, sent | packets |\n\n",integration_type:"collector",id:"go.d.plugin-ping-Ping",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/ping/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-portcheck",plugin_name:"go.d.plugin",module_name:"portcheck",monitored_instance:{name:"TCP/UDP Endpoints",link:"",icon_filename:"globe.svg",categories:["data-collection.synthetic-testing"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# TCP/UDP Endpoints\n\nPlugin: go.d.plugin\nModule: portcheck\n\n## Overview\n\nCollector for monitoring service availability and response time. It can be used to check if specific ports are open or reachable on a target system.\n\nIt supports both TCP and UDP protocols over IPv4 and IPv6 networks.\n\n| Protocol | Check Description                                                                                                           |\n|----------|-----------------------------------------------------------------------------------------------------------------------------|\n| TCP      | Attempts to establish a TCP connection to the specified ports on the target system.                                         |\n| UDP      | Sends a 0-byte UDP packet to the specified ports on the target system and analyzes ICMP responses to determine port status. |\n\nPossible TCP statuses:\n\n| TCP Status | Description                                                 |\n|------------|-------------------------------------------------------------|\n| success    | Connection established successfully.                        |\n| timeout    | Connection timed out after waiting for configured duration. |\n| failed     | An error occurred during the connection attempt.            |\n\nPossible UDP statuses:\n\n| UDP Status    | Description                                                                                                                                                           |\n|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| open/filtered | No response received within the configured timeout. This status indicates the port is either open or filtered, but the exact state cannot be determined definitively. |\n| closed        | Received an ICMP Destination Unreachable message, indicating the port is closed.                                                                                      |\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **portcheck** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **portcheck**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/portcheck.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | host | Remote host address (IPv4, IPv6, or DNS name). |  | yes |\n|  | ports | List of TCP ports to check (numeric format). | [] | no |\n|  | udp_ports | List of UDP ports to check (numeric format). | [] | no |\n|  | timeout | Port check timeout (seconds). | 2 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **portcheck** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the portcheck data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _portcheck_ (or scroll the list) to locate the **portcheck** collector.\n5. Click the **+** next to the **portcheck** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/portcheck.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/portcheck.conf\n```\n\n##### Examples\n\n###### Check TCP ports (IPv4)\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    host: 127.0.0.1\n    ports:\n      - 22\n      - 23\n\n```\n{% /details %}\n###### Check TCP ports (IPv6)\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    host: "[2001:DB8::1]"\n    ports:\n      - 80\n      - 8080\n\n```\n{% /details %}\n###### Check UDP ports (IPv4)\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    host: 127.0.0.1\n    udp_ports:\n      - 3120\n      - 3121\n\n```\n{% /details %}\n###### Check UDP ports (IPv6)\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    host: [::1]\n    udp_ports:\n      - 3120\n      - 3121\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: server1\n    host: 127.0.0.1\n    ports:\n      - 22\n      - 23\n\n  - name: server2\n    host: 203.0.113.10\n    ports:\n      - 22\n      - 23\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `portcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m portcheck\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m portcheck -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `portcheck` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep portcheck\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep portcheck /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep portcheck\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ portcheck_service_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | TCP host ${label:host} port ${label:port} liveness status |\n| [ portcheck_connection_timeouts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n| [ portcheck_connection_fails ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per TCP endpoint\n\nThese metrics refer to the TCP endpoint.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| host | The hostname or IP address of the target system, as specified in the configuration. |\n| port | The TCP port being monitored, as defined in the 'ports' configuration parameter. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| portcheck.status | success, failed, timeout | boolean |\n| portcheck.state_duration | time | seconds |\n| portcheck.latency | time | ms |\n\n### Per UDP endpoint\n\nThese metrics refer to the UDP endpoint.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| host | The hostname or IP address of the target system, as specified in the configuration. |\n| port | The UDP port being monitored, as defined in the 'udp_ports' configuration parameter. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| portcheck.udp_port_status | open/filtered, closed | status |\n| portcheck.udp_port_status_duration | time | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-portcheck-TCP/UDP_Endpoints",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/portcheck/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"go.d.plugin",module_name:"postfix",monitored_instance:{name:"Postfix",link:"https://www.postfix.org/",categories:["data-collection.applications"],icon_filename:"postfix.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["postfix","mail","mail server"]},overview:"# Postfix\n\nPlugin: go.d.plugin\nModule: postfix\n\n## Overview\n\nThis collector retrieves statistics about the Postfix mail queue using the [postqueue](https://www.postfix.org/postqueue.1.html) command-line tool.\n\n\nIt periodically executes the `postqueue -p` command. The collection interval is set to 10 seconds by default, but this can be configurable.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nPostfix has internal access controls for the mail queue. By default, all users can view the queue. If your system has stricter controls, grant the `netdata` user access by adding it to `authorized_mailq_users` in the `/etc/postfix/main.cf `file. For more details, refer to the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html).\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector executes  `postqueue -p` to get Postfix queue statistics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **postfix** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **postfix**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/postfix.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `postqueue` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/sbin/postqueue | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **postfix** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the postfix data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _postfix_ (or scroll the list) to locate the **postfix** collector.\n5. Click the **+** next to the **postfix** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/postfix.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postfix.conf\n```\n\n##### Examples\n\n###### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary="" %}\n```yaml\njobs:\n  - name: custom_path\n    binary_path: /usr/local/sbin/postqueue\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `postfix` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m postfix\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m postfix -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `postfix` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep postfix\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep postfix /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep postfix\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Postfix instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postfix.qemails | emails | emails |\n| postfix.qsize | size | KiB |\n\n",integration_type:"collector",id:"go.d.plugin-postfix-Postfix",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/postfix/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-postgres",plugin_name:"go.d.plugin",module_name:"postgres",monitored_instance:{name:"PostgreSQL",link:"https://www.postgresql.org/",categories:["data-collection.databases"],icon_filename:"postgres.svg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["db","database","postgres","postgresql","sql"]},overview:'# PostgreSQL\n\nPlugin: go.d.plugin\nModule: postgres\n\n## Overview\n\nThis collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more.\n\n\nIt establishes a connection to the Postgres instance via a TCP or UNIX socket.\nTo collect metrics for database tables and indexes, it establishes an additional connection for each discovered database.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nPostgreSQL can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets:\n\n- 127.0.0.1:5432\n- /var/run/postgresql/\n\n\n#### Limits\n\nTable and index metrics are not collected for databases with more than 50 tables or 250 indexes.\nThese limits can be changed in the configuration file.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:"## Setup\n\n\nYou can configure the **postgres** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **postgres**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/postgres.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with granted `pg_monitor`\nor `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).\n\nTo create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:\n\n```postgresql\nCREATE USER netdata;\nGRANT pg_monitor TO netdata;\n```\n\nAfter creating the new user, restart the Netdata Agent with `sudo systemctl restart netdata`, or\nthe [appropriate method](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/start-stop-restart.md) for your\nsystem.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | dsn | Postgres connection string (DSN). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes |\n|  | timeout | Query timeout (seconds). | 2 | no |\n| **Filters** | collect_databases_matching | Database selector. Controls which databases are included. Uses [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#simple-patterns-matcher). |  | no |\n| **Limits** | max_db_tables | Maximum number of tables per database to collect metrics for (0 = no limit). | 50 | no |\n|  | max_db_indexes | Maximum number of indexes per database to collect metrics for (0 = no limit). | 250 | no |\n| **Functions** | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **postgres** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the postgres data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _postgres_ (or scroll the list) to locate the **postgres** collector.\n5. Click the **+** next to the **postgres** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/postgres.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postgres.conf\n```\n\n##### Examples\n\n###### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n  - name: local\n    dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n```\n###### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'\n\n```\n{% /details %}\n###### Unix socket (custom port)\n\nConnect to PostgreSQL using a Unix socket with a non-default port (5433).\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: 'host=/var/run/postgresql port=5433 dbname=postgres user=netdata'\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n  - name: remote\n    dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `postgres` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m postgres\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m postgres -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `postgres` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep postgres\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep postgres /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep postgres\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ postgres_total_connection_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.connections_utilization | average total connection utilization over the last minute |\n| [ postgres_acquired_locks_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.locks_utilization | average acquired locks utilization over the last minute |\n| [ postgres_txid_exhaustion_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.txid_exhaustion_perc | percent towards TXID wraparound |\n| [ postgres_db_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average cache hit ratio in db ${label:database} over the last minute |\n| [ postgres_db_transactions_rollback_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average aborted transactions percentage in db ${label:database} over the last five minutes |\n| [ postgres_db_deadlocks_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_deadlocks_rate | number of deadlocks detected in db ${label:database} in the last minute |\n| [ postgres_table_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_cache_io_ratio | average cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_index_cache_io_ratio | average index cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_cache_io_ratio | average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_index_cache_io_ratio | average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} |\n| [ postgres_table_last_autovacuum_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autovacuum_since_time | time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon |\n| [ postgres_table_last_autoanalyze_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autoanalyze_since_time | time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon |\n| [ postgres_index_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.index_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} index ${label:index} |\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves aggregated SQL query performance metrics from PostgreSQL using either [pg_stat_monitor](https://docs.percona.com/pg-stat-monitor/) (preferred) or [pg_stat_statements](https://www.postgresql.org/docs/current/pgstatstatements.html).\n\nThe collector automatically detects which extension is available:\n- **pg_stat_monitor** (Percona): Enhanced statistics with additional columns like application name, client IP, CPU time, error info, and query classification\n- **pg_stat_statements** (standard): Core execution statistics available in all PostgreSQL installations\n\nStatistics include execution counts, timing metrics, I/O operations, and resource consumption. Columns are dynamically detected based on your PostgreSQL version and available extension.\n\nUse cases:\n- Identify slow queries consuming the most total execution time\n- Find queries with high shared block reads for I/O optimization\n- Analyze temp block usage to detect queries needing memory tuning\n- With pg_stat_monitor: Track queries by application, identify error patterns\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Postgres:top-queries` |\n| Require Cloud | yes |\n| Performance | Queries `pg_stat_statements` or `pg_stat_monitor` which maintain statistics in shared memory:<br/>\u2022 On busy servers with many unique queries, the extension may consume significant memory<br/>\u2022 Default limit of 500 rows balances usefulness with performance<br/>\u2022 pg_stat_monitor uses time-based buckets which may have different memory characteristics |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in WHERE clauses or INSERT values<br/>\u2022 Business data and internal identifiers<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 Either `pg_stat_statements` or `pg_stat_monitor` extension is installed<br/>\u2022 The collector has successfully connected to PostgreSQL<br/>\u2022 Returns HTTP 503 if no query statistics extension is installed<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Enable pg_stat_statements or pg_stat_monitor\n\nEither `pg_stat_statements` (standard) or `pg_stat_monitor` (Percona) must be installed. The collector auto-detects which is available, preferring pg_stat_monitor when both are present.\n\n**Option 1: pg_stat_statements (standard PostgreSQL)**\n\n1. Add to `postgresql.conf`:\n\n   ```ini\n   shared_preload_libraries = 'pg_stat_statements'\n   ```\n\n2. Restart PostgreSQL, then create the extension:\n\n   ```sql\n   CREATE EXTENSION pg_stat_statements;\n   ```\n\n**Option 2: pg_stat_monitor (Percona - recommended)**\n\nProvides additional columns: application name, client IP, CPU time, error tracking, and query classification.\n\n1. Install pg_stat_monitor (available in Percona distribution or as separate package)\n\n2. Add to `postgresql.conf`:\n\n   ```ini\n   shared_preload_libraries = 'pg_stat_monitor'\n   ```\n\n3. Restart PostgreSQL, then create the extension:\n\n   ```sql\n   CREATE EXTENSION pg_stat_monitor;\n   ```\n\n:::info\n\n- Both extensions require a server restart to load the shared library\n- Statistics can be reset with `SELECT pg_stat_statements_reset()` or `SELECT pg_stat_monitor_reset()`\n- Enable `track_io_timing` for block read/write timing metrics\n\n:::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Options include total time, mean time, calls, rows, shared blocks hit/read, and temp blocks written. Defaults to total time to focus on most resource-intensive queries. | yes | totalTime |  |\n\n#### Returns\n\nAggregated query statistics from `pg_stat_statements` or `pg_stat_monitor`. Each row represents a unique query pattern with cumulative metrics across all executions.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Query ID | string |  | hidden | Internal hash identifier for the normalized query. Can be used to track queries across statistics resets. |\n| Query | string |  |  | Normalized SQL query text with literals replaced by parameter placeholders. Truncated to 4096 characters. |\n| Database | string |  |  | Database name where the query was executed. |\n| User | string |  |  | PostgreSQL user who executed the query. |\n| Calls | integer |  |  | Total number of times this query pattern has been executed. High values indicate frequently run queries. |\n| Total Time | duration | milliseconds |  | Cumulative execution time across all executions. High values indicate queries consuming significant database resources. |\n| Mean Time | duration | milliseconds |  | Average execution time per call. Use this to compare typical performance across different query patterns. |\n| Min Time | duration | milliseconds | hidden | Minimum execution time observed for a single execution. |\n| Max Time | duration | milliseconds | hidden | Maximum execution time observed for a single execution. Large gaps between min and max may indicate performance variability. |\n| Stddev Time | duration | milliseconds | hidden | Standard deviation of execution time. High values indicate inconsistent query performance. |\n| Plans | integer |  | hidden | Number of times the query was planned. Available in PostgreSQL 13+. |\n| Total Plan Time | duration | milliseconds | hidden | Cumulative time spent planning the query. Available in PostgreSQL 13+. |\n| Mean Plan Time | duration | milliseconds | hidden | Average time spent planning per execution. Available in PostgreSQL 13+. |\n| Min Plan Time | duration | milliseconds | hidden | Minimum planning time observed. Available in PostgreSQL 13+. |\n| Max Plan Time | duration | milliseconds | hidden | Maximum planning time observed. Available in PostgreSQL 13+. |\n| Stddev Plan Time | duration | milliseconds | hidden | Standard deviation of planning time. Available in PostgreSQL 13+. |\n| Rows | integer |  |  | Total number of rows retrieved or affected across all executions. |\n| Shared Blocks Hit | integer |  |  | Total shared buffer cache hits. High values indicate good cache utilization. |\n| Shared Blocks Read | integer |  |  | Total shared blocks read from disk. High values indicate queries that bypass the cache and may benefit from more `shared_buffers`. |\n| Shared Blocks Dirtied | integer |  | hidden | Total shared blocks dirtied by the query. |\n| Shared Blocks Written | integer |  | hidden | Total shared blocks written by the query. |\n| Local Blocks Hit | integer |  | hidden | Total local buffer cache hits (temporary tables). |\n| Local Blocks Read | integer |  | hidden | Total local blocks read from disk. |\n| Local Blocks Dirtied | integer |  | hidden | Total local blocks dirtied. |\n| Local Blocks Written | integer |  | hidden | Total local blocks written. |\n| Temp Blocks Read | integer |  |  | Total temp blocks read. Non-zero values indicate queries spilling to disk due to insufficient `work_mem`. |\n| Temp Blocks Written | integer |  |  | Total temp blocks written. High values suggest increasing `work_mem` may improve performance. |\n| Block Read Time | duration | milliseconds |  | Time spent reading blocks from disk. Requires `track_io_timing` to be enabled. |\n| Block Write Time | duration | milliseconds |  | Time spent writing blocks to disk. Requires `track_io_timing` to be enabled. |\n| WAL Records | integer |  | hidden | Total number of WAL records generated. Available in PostgreSQL 13+. |\n| WAL Full Page Images | integer |  | hidden | Total number of WAL full page images generated. Available in PostgreSQL 13+. |\n| WAL Bytes | integer |  | hidden | Total bytes of WAL generated. Available in PostgreSQL 13+. |\n| JIT Functions | integer |  | hidden | Total number of functions JIT-compiled. Available in PostgreSQL 15+. |\n| JIT Generation Time | duration | milliseconds | hidden | Time spent generating JIT code. Available in PostgreSQL 15+. |\n| JIT Inlining Count | integer |  | hidden | Number of times JIT inlining was performed. Available in PostgreSQL 15+. |\n| JIT Inlining Time | duration | milliseconds | hidden | Time spent on JIT inlining. Available in PostgreSQL 15+. |\n| JIT Optimization Count | integer |  | hidden | Number of times JIT optimization was performed. Available in PostgreSQL 15+. |\n| JIT Optimization Time | duration | milliseconds | hidden | Time spent on JIT optimization. Available in PostgreSQL 15+. |\n| JIT Emission Count | integer |  | hidden | Number of times JIT code was emitted. Available in PostgreSQL 15+. |\n| JIT Emission Time | duration | milliseconds | hidden | Time spent emitting JIT code. Available in PostgreSQL 15+. |\n| Temp Block Read Time | duration | milliseconds | hidden | Time spent reading temp blocks. Available in PostgreSQL 15+. Requires `track_io_timing`. |\n| Temp Block Write Time | duration | milliseconds | hidden | Time spent writing temp blocks. Available in PostgreSQL 15+. Requires `track_io_timing`. |\n| Application Name | string |  |  | Name of the application that executed the query. Available with pg_stat_monitor only. |\n| Client IP | string |  | hidden | IP address of the client that executed the query. Available with pg_stat_monitor only. |\n| Command Type | string |  |  | Type of SQL command (SELECT, INSERT, UPDATE, DELETE, etc.). Available with pg_stat_monitor only. |\n| Comments | string |  | hidden | SQL comments extracted from the query. Available with pg_stat_monitor only. |\n| Relations | string |  | hidden | Tables/relations involved in the query. Available with pg_stat_monitor only. |\n| CPU User Time | duration | milliseconds | hidden | CPU time spent in user mode. Available with pg_stat_monitor only. |\n| CPU System Time | duration | milliseconds | hidden | CPU time spent in system/kernel mode. Available with pg_stat_monitor only. |\n| Error Level | integer |  | hidden | PostgreSQL error level if query produced an error. Available with pg_stat_monitor only. |\n| SQL Code | string |  | hidden | PostgreSQL SQLSTATE error code if query produced an error. Available with pg_stat_monitor only. |\n| Error Message | string |  | hidden | Error message if query produced an error. Available with pg_stat_monitor only. |\n| Top Level | string |  | hidden | Whether this is a top-level statement (true) or nested (false). Available with pg_stat_monitor only. |\n| Bucket Start Time | string |  | hidden | Start time of the statistics bucket. Available with pg_stat_monitor only. |\n\n### Running Queries\n\nRetrieves currently executing queries from PostgreSQL [pg_stat_activity](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) system view.\n\nThis function queries `pg_stat_activity` which shows real-time information about each server process including the SQL query being executed, wait events, and session state. Unlike Top Queries which shows aggregated historical statistics, Running Queries shows live snapshots of active queries.\n\nUse cases:\n- Identify long-running queries that may be blocking other operations\n- Debug stuck transactions or hanging connections\n- Monitor active workload during performance issues\n- Investigate wait events and lock contention in real-time\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Postgres:running-queries` |\n| Require Cloud | yes |\n| Performance | Queries `pg_stat_activity` which is a live system view:<br/>\u2022 Very lightweight query, no impact on database performance<br/>\u2022 Returns only active queries by default (state = 'active')<br/>\u2022 Limited to 500 rows |\n| Security | Query text contains actual SQL being executed, which may include:<br/>\u2022 Personal information in WHERE clauses or INSERT values<br/>\u2022 Business data and internal identifiers<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to PostgreSQL<br/>\u2022 Returns HTTP 503 if collector is still initializing<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Database user permissions\n\nThe monitoring user needs `pg_monitor` role to view all sessions:\n\n```sql\nGRANT pg_monitor TO netdata;\n```\n\nWithout this role, the user can only see their own sessions.\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Sort By | select | Select the sort column. Defaults to query duration (longest running first). | yes | durationMs |  |\n\n#### Returns\n\nLive query data from `pg_stat_activity`. Each row represents a currently active backend process.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Duration | duration | milliseconds |  | Query duration in milliseconds (since query_start). High values indicate long-running queries. |\n| Query | string |  |  | Query text of the currently executing or most recent query. May be truncated at track_activity_query_size. |\n| Database | string |  |  | Name of the database this backend is connected to. |\n| User | string |  |  | Name of the user logged into this backend. |\n| Application Name | string |  |  | Name of the application connected to this backend. |\n| Client Address | string |  |  | IP address of the client (NULL for Unix socket or internal process). |\n| Wait Event | string |  |  | Specific wait event name if backend is currently waiting. |\n| PID | integer |  |  | Process ID of this backend. Use with pg_terminate_backend() to kill a query. |\n| Wait Event Type | string |  | hidden | Type of event the backend is waiting for (Activity, BufferPin, Client, Extension, IO, IPC, Lock, LWLock, Timeout). |\n| State | string |  | hidden | Current state: active, idle, idle in transaction, idle in transaction (aborted), fastpath function call, disabled. |\n| Backend Type | string |  | hidden | Type of backend: client backend, autovacuum worker, parallel worker, walsender, walreceiver, etc. Available in PostgreSQL 10+. |\n| Query Start | timestamp |  | hidden | Time when the currently active query was started. |\n| Transaction Start | timestamp |  | hidden | Time when current transaction started (NULL if no transaction). |\n| Backend Start | timestamp |  | hidden | Time when this process/connection started. |\n| State Change | timestamp |  | hidden | Time when state was last changed. |\n| Query ID | string |  | hidden | Query identifier (requires compute_query_id or extension). Available in PostgreSQL 14+. |\n| Leader PID | integer |  | hidden | Process ID of parallel group leader (NULL if this is leader or not parallel). Available in PostgreSQL 13+. |\n| Database ID | integer |  | hidden | OID of the database this backend is connected to. |\n| User ID | integer |  | hidden | OID of the user logged into this backend. |\n| Client Hostname | string |  | hidden | Hostname of the client via reverse DNS (only if log_hostname enabled). |\n| Client Port | integer |  | hidden | TCP port of client (-1 for Unix socket, NULL for internal process). |\n| Backend Xid | string |  | hidden | Top-level transaction identifier of this backend. |\n| Backend Xmin | string |  | hidden | Backend's xmin horizon. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PostgreSQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.connections_utilization | used | percentage |\n| postgres.connections_usage | available, used | connections |\n| postgres.connections_state_count | active, idle, idle_in_transaction, idle_in_transaction_aborted, disabled | connections |\n| postgres.transactions_duration | a dimension per bucket | transactions/s |\n| postgres.queries_duration | a dimension per bucket | queries/s |\n| postgres.locks_utilization | used | percentage |\n| postgres.checkpoints_rate | scheduled, requested | checkpoints/s |\n| postgres.checkpoints_time | write, sync | milliseconds |\n| postgres.bgwriter_halts_rate | maxwritten | events/s |\n| postgres.buffers_io_rate | checkpoint, backend, bgwriter | B/s |\n| postgres.buffers_backend_fsync_rate | fsync | calls/s |\n| postgres.buffers_allocated_rate | allocated | B/s |\n| postgres.wal_io_rate | write | B/s |\n| postgres.wal_files_count | written, recycled | files |\n| postgres.wal_archiving_files_count | ready, done | files/s |\n| postgres.autovacuum_workers_count | analyze, vacuum_analyze, vacuum, vacuum_freeze, brin_summarize | workers |\n| postgres.txid_exhaustion_towards_autovacuum_perc | emergency_autovacuum | percentage |\n| postgres.txid_exhaustion_perc | txid_exhaustion | percentage |\n| postgres.txid_exhaustion_oldest_txid_num | xid | xid |\n| postgres.catalog_relations_count | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | relations |\n| postgres.catalog_relations_size | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | B |\n| postgres.uptime | uptime | seconds |\n| postgres.databases_count | databases | databases |\n\n### Per repl application\n\nThese metrics refer to the replication application.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| application | application name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_app_wal_lag_size | sent_lag, write_lag, flush_lag, replay_lag | B |\n| postgres.replication_app_wal_lag_time | write_lag, flush_lag, replay_lag | seconds |\n\n### Per repl slot\n\nThese metrics refer to the replication slot.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| slot | replication slot name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_slot_files_count | wal_keep, pg_replslot_files | files |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.db_transactions_ratio | committed, rollback | percentage |\n| postgres.db_transactions_rate | committed, rollback | transactions/s |\n| postgres.db_connections_utilization | used | percentage |\n| postgres.db_connections_count | connections | connections |\n| postgres.db_cache_io_ratio | miss | percentage |\n| postgres.db_io_rate | memory, disk | B/s |\n| postgres.db_ops_fetched_rows_ratio | fetched | percentage |\n| postgres.db_ops_read_rows_rate | returned, fetched | rows/s |\n| postgres.db_ops_write_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.db_conflicts_rate | conflicts | queries/s |\n| postgres.db_conflicts_reason_rate | tablespace, lock, snapshot, bufferpin, deadlock | queries/s |\n| postgres.db_deadlocks_rate | deadlocks | deadlocks/s |\n| postgres.db_locks_held_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_locks_awaited_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_temp_files_created_rate | created | files/s |\n| postgres.db_temp_files_io_rate | written | B/s |\n| postgres.db_size | size | B |\n\n### Per table\n\nThese metrics refer to the database table.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.table_rows_dead_ratio | dead | percentage |\n| postgres.table_rows_count | live, dead | rows |\n| postgres.table_ops_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.table_ops_rows_hot_ratio | hot | percentage |\n| postgres.table_ops_rows_hot_rate | hot | rows/s |\n| postgres.table_cache_io_ratio | miss | percentage |\n| postgres.table_io_rate | memory, disk | B/s |\n| postgres.table_index_cache_io_ratio | miss | percentage |\n| postgres.table_index_io_rate | memory, disk | B/s |\n| postgres.table_toast_cache_io_ratio | miss | percentage |\n| postgres.table_toast_io_rate | memory, disk | B/s |\n| postgres.table_toast_index_cache_io_ratio | miss | percentage |\n| postgres.table_toast_index_io_rate | memory, disk | B/s |\n| postgres.table_scans_rate | index, sequential | scans/s |\n| postgres.table_scans_rows_rate | index, sequential | rows/s |\n| postgres.table_autovacuum_since_time | time | seconds |\n| postgres.table_vacuum_since_time | time | seconds |\n| postgres.table_autoanalyze_since_time | time | seconds |\n| postgres.table_analyze_since_time | time | seconds |\n| postgres.table_null_columns | null | columns |\n| postgres.table_size | size | B |\n| postgres.table_bloat_size_perc | bloat | percentage |\n| postgres.table_bloat_size | bloat | B |\n\n### Per index\n\nThese metrics refer to the table index.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n| index | index name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.index_size | size | B |\n| postgres.index_bloat_size_perc | bloat | percentage |\n| postgres.index_bloat_size | bloat | B |\n| postgres.index_usage_status | used, unused | status |\n\n",integration_type:"collector",id:"go.d.plugin-postgres-PostgreSQL",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/postgres/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-powerdns",plugin_name:"go.d.plugin",module_name:"powerdns",monitored_instance:{name:"PowerDNS Authoritative Server",link:"https://doc.powerdns.com/authoritative/",icon_filename:"powerdns.svg",categories:["data-collection.networking"]},keywords:["powerdns","dns"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# PowerDNS Authoritative Server\n\nPlugin: go.d.plugin\nModule: powerdns\n\n## Overview\n\nThis collector monitors PowerDNS Authoritative Server instances.\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/authoritative/http-api/statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **powerdns** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **powerdns**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/powerdns.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8081 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **powerdns** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the powerdns data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _powerdns_ (or scroll the list) to locate the **powerdns** collector.\n5. Click the **+** next to the **powerdns** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/powerdns.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8081\n\n```\n{% /details %}\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8081\n    username: admin\n    password: password\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8081\n\n  - name: remote\n    url: http://203.0.113.0:8081\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `powerdns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m powerdns\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m powerdns -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `powerdns` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep powerdns\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep powerdns /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep powerdns\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Authoritative Server instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns.questions_in | udp, tcp | questions/s |\n| powerdns.questions_out | udp, tcp | questions/s |\n| powerdns.cache_usage | query-cache-hit, query-cache-miss, packetcache-hit, packetcache-miss | events/s |\n| powerdns.cache_size | query-cache, packet-cache, key-cache, meta-cache | entries |\n| powerdns.latency | latency | microseconds |\n\n",integration_type:"collector",id:"go.d.plugin-powerdns-PowerDNS_Authoritative_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/powerdns/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-powerdns_recursor",plugin_name:"go.d.plugin",module_name:"powerdns_recursor",monitored_instance:{name:"PowerDNS Recursor",link:"https://doc.powerdns.com/recursor/",icon_filename:"powerdns.svg",categories:["data-collection.networking"]},keywords:["powerdns","dns"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# PowerDNS Recursor\n\nPlugin: go.d.plugin\nModule: powerdns_recursor\n\n## Overview\n\nThis collector monitors PowerDNS Recursor instances.\n\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/recursor/http-api/index.html#built-in-webserver-and-http-api).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/recursor/common/api/endpoint-statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **powerdns_recursor** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **powerdns_recursor**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/powerdns_recursor.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8081 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **powerdns_recursor** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the powerdns_recursor data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _powerdns_recursor_ (or scroll the list) to locate the **powerdns_recursor** collector.\n5. Click the **+** next to the **powerdns_recursor** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/powerdns_recursor.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns_recursor.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8081\n\n```\n{% /details %}\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8081\n    username: admin\n    password: password\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8081\n\n  - name: remote\n    url: http://203.0.113.0:8081\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `powerdns_recursor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m powerdns_recursor\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m powerdns_recursor -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `powerdns_recursor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep powerdns_recursor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep powerdns_recursor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep powerdns_recursor\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Recursor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns_recursor.questions_in | total, tcp, ipv6 | questions/s |\n| powerdns_recursor.questions_out | udp, tcp, ipv6, throttled | questions/s |\n| powerdns_recursor.answer_time | 0-1ms, 1-10ms, 10-100ms, 100-1000ms, slow | queries/s |\n| powerdns_recursor.timeouts | total, ipv4, ipv6 | timeouts/s |\n| powerdns_recursor.drops | over-capacity-drops, query-pipe-full-drops, too-old-drops, truncated-drops, empty-queries | drops/s |\n| powerdns_recursor.cache_usage | cache-hits, cache-misses, packet-cache-hits, packet-cache-misses | events/s |\n| powerdns_recursor.cache_size | cache, packet-cache, negative-cache | entries |\n\n",integration_type:"collector",id:"go.d.plugin-powerdns_recursor-PowerDNS_Recursor",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/powerdns_recursor/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-4d_server",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"4D Server",link:"https://github.com/ThomasMaul/Prometheus_4D_Exporter",icon_filename:"4d_server.png",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# 4D Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor 4D Server performance metrics for efficient application management and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-4D_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-8430ft-modem",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"8430FT modem",link:"https://github.com/dernasherbrezon/8430ft_exporter",icon_filename:"mtc.svg",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# 8430FT modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of vital metrics from the MTS 8430FT modem for streamlined network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-8430FT_modem",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-amd_smi",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AMD CPU & GPU",link:"https://github.com/amd/amd_smi_exporter",icon_filename:"amd.svg",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# AMD CPU & GPU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AMD System Management Interface performance for optimized hardware management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AMD_CPU_&_GPU",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-apicast",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"APIcast",link:"https://github.com/3scale/apicast",icon_filename:"apicast.png",categories:["data-collection.web-servers-and-proxies"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# APIcast\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor APIcast performance metrics to optimize API gateway operations and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [APIcast](https://github.com/3scale/apicast).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-APIcast",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_ec2",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AWS EC2 Compute instances",link:"https://github.com/O1ahmad/aws_ec2_exporter",icon_filename:"aws-ec2.png",categories:["data-collection.cloud-and-devops"]},keywords:["cloud services","cloud computing","aws services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# AWS EC2 Compute instances\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS EC2 instances key metrics for optimized performance and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AWS_EC2_Compute_instances",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_quota",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AWS Quota",link:"https://github.com/emylincon/aws_quota_exporter",icon_filename:"aws.svg",categories:["data-collection.cloud-and-devops"]},keywords:["cloud services","cloud computing","aws services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# AWS Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS service quotas for effective resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AWS_Quota",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_rds",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AWS RDS",link:"https://github.com/percona/rds_exporter",icon_filename:"aws-rds.svg",categories:["data-collection.databases"]},keywords:["cloud services","cloud computing","aws services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# AWS RDS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Amazon RDS (Relational Database Service) metrics for efficient cloud database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [rds_exporter](https://github.com/percona/rds_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AWS_RDS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-alamos_fe2",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Alamos FE2 server",link:"https://github.com/codemonauts/prometheus-fe2-exporter",icon_filename:"alamos_fe2.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Alamos FE2 server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Alamos FE2 systems for improved performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Alamos_FE2_server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-authlog",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"AuthLog",link:"https://github.com/woblerr/authlog_exporter",icon_filename:"linux.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# AuthLog\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor authentication logs for security insights and efficient access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AuthLog Exporter](https://github.com/woblerr/authlog_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-AuthLog",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-bosh",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"BOSH",link:"https://github.com/bosh-prometheus/bosh_exporter",icon_filename:"bosh.png",categories:["data-collection.cloud-and-devops"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# BOSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on BOSH deployment metrics for improved cloud orchestration and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-BOSH",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-bird",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Bird Routing Daemon",link:"https://github.com/czerwonk/bird_exporter",icon_filename:"bird.png",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Bird Routing Daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Bird Routing Daemon metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Bird_Routing_Daemon",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-blackbox",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Blackbox",link:"https://github.com/prometheus/blackbox_exporter",icon_filename:"prometheus.svg",categories:["data-collection.synthetic-testing"]},keywords:["blackbox"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack external service availability and response times with Blackbox monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Blackbox exporter](https://github.com/prometheus/blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Blackbox",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-bungeecord",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"BungeeCord",link:"https://github.com/weihao/bungeecord-prometheus-exporter",icon_filename:"bungee.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# BungeeCord\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack BungeeCord proxy server metrics for efficient load balancing and performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-BungeeCord",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-celery",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Celery",link:"https://github.com/ZeitOnline/celery_redis_prometheus",icon_filename:"celery.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Celery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Celery task queue metrics for optimized task processing and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Celery",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-chia",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Chia",link:"https://github.com/chia-network/chia-exporter",icon_filename:"chia.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Chia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Chia blockchain metrics for optimized farming and resource allocation.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Chia Exporter](https://github.com/chia-network/chia-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Chia",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-clm5ip",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Christ Elektronik CLM5IP power panel",link:"https://github.com/christmann/clm5ip_exporter/",icon_filename:"christelec.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Christ Elektronik CLM5IP power panel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Christ Elektronik CLM5IP device metrics for efficient performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Christ_Elektronik_CLM5IP_power_panel",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cilium_agent",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cilium Agent",link:"https://github.com/cilium/cilium",icon_filename:"cilium.png",categories:["data-collection.containers-and-vms"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Cilium Agent\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Cilium Agent metrics for optimized network security and connectivity.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Agent](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cilium_Agent",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cilium_operator",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cilium Operator",link:"https://github.com/cilium/cilium",icon_filename:"cilium.png",categories:["data-collection.containers-and-vms"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Cilium Operator\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cilium Operator metrics for efficient Kubernetes network security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Operator](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cilium_Operator",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cilium_proxy",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cilium Proxy",link:"https://github.com/cilium/proxy",icon_filename:"cilium.png",categories:["data-collection.containers-and-vms"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Cilium Proxy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cilium Proxy metrics for enhanced network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Proxy](https://github.com/cilium/proxy).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cilium_Proxy",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-clamd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"ClamAV daemon",link:"https://github.com/sergeymakinen/clamav_exporter",icon_filename:"clamav.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# ClamAV daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack ClamAV antivirus metrics for enhanced threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-ClamAV_daemon",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-clamscan",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Clamscan results",link:"https://github.com/FortnoxAB/clamscan-exporter",icon_filename:"clamav.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Clamscan results\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ClamAV scanning performance metrics for efficient malware detection and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Clamscan_results",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-clash",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Clash",link:"https://github.com/elonzh/clash_exporter",icon_filename:"clash.png",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Clash\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Clash proxy server metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Clash exporter](https://github.com/elonzh/clash_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Clash",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-aws_cloudwatch",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"CloudWatch",link:"https://github.com/prometheus/cloudwatch_exporter",icon_filename:"aws-cloudwatch.png",categories:["data-collection.cloud-and-devops"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# CloudWatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS CloudWatch metrics for comprehensive AWS resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-CloudWatch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cloud_foundry",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cloud Foundry",link:"https://github.com/bosh-prometheus/cf_exporter",icon_filename:"cloud-foundry.svg",categories:["data-collection.cloud-and-devops"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Cloud Foundry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cloud Foundry platform metrics for optimized application deployment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cloud_Foundry",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cloud_foundry_firebase",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cloud Foundry Firehose",link:"https://github.com/bosh-prometheus/firehose_exporter",icon_filename:"cloud-foundry.svg",categories:["data-collection.cloud-and-devops"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Cloud Foundry Firehose\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cloud Foundry Firehose metrics for comprehensive platform diagnostics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cloud_Foundry_Firehose",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cmon",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"ClusterControl CMON",link:"https://github.com/severalnines/cmon_exporter",icon_filename:"cluster-control.svg",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# ClusterControl CMON\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CMON metrics for Severalnines Cluster Control for efficient monitoring and management of database operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CMON Exporter](https://github.com/severalnines/cmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-ClusterControl_CMON",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-collectd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Collectd",link:"https://github.com/prometheus/collectd_exporter",icon_filename:"collectd.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Collectd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system and application metrics with Collectd for comprehensive performance analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Collectd exporter](https://github.com/prometheus/collectd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Collectd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-concourse",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Concourse",link:"https://concourse-ci.org",icon_filename:"concourse.png",categories:["data-collection.cloud-and-devops"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Concourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Concourse CI/CD pipeline metrics for optimized workflow management and deployment.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Concourse built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Concourse",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ftbeerpi",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"CraftBeerPi",link:"https://github.com/jo-hannes/craftbeerpi_exporter",icon_filename:"craftbeer.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# CraftBeerPi\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on CraftBeerPi homebrewing metrics for optimized brewing process management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-CraftBeerPi",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-crowdsec",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Crowdsec",link:"https://docs.crowdsec.net/docs/observability/prometheus",icon_filename:"crowdsec.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Crowdsec\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Crowdsec security metrics for efficient threat detection and response.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Crowdsec build-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Crowdsec",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-cryptowatch",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Cryptowatch",link:"https://github.com/nbarrientos/cryptowat_exporter",icon_filename:"cryptowatch.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Cryptowatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cryptowatch market data metrics for comprehensive cryptocurrency market analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Cryptowatch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-dmarc",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"DMARC",link:"https://github.com/jgosmann/dmarc-metrics-exporter",icon_filename:"dmarc.png",categories:["data-collection.applications"]},keywords:["email authentication","policy","reporting"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# DMARC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DMARC email authentication metrics for improved email security and deliverability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-DMARC",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-dnsbl",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"DNSBL",link:"https://github.com/Luzilla/dnsbl_exporter/",icon_filename:"dnsbl.png",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# DNSBL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor DNSBL metrics for efficient domain reputation and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-DNSBL",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-discourse",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Discourse",link:"https://github.com/discourse/discourse-prometheus",icon_filename:"discourse.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Discourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Discourse forum metrics for efficient community management and engagement.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Discourse Exporter](https://github.com/discourse/discourse-prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Discourse",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-dutch_electricity_smart_meter",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Dutch Electricity Smart Meter",link:"https://github.com/TobiasDeBruijn/prometheus-p1-exporter",icon_filename:"dutch-electricity.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Dutch Electricity Smart Meter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Dutch smart meter P1 port metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Dutch_Electricity_Smart_Meter",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-dynatrace",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Dynatrace",link:"https://github.com/Apside-TOP/dynatrace_exporter",icon_filename:"dynatrace.svg",categories:["data-collection.cloud-and-devops"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Dynatrace\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dynatrace APM metrics for comprehensive application performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Dynatrace",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-eos_web",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"EOS",link:"https://eos-web.web.cern.ch/eos-web/",icon_filename:"eos.png",categories:["data-collection.storage"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# EOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor CERN EOS metrics for efficient storage management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [EOS exporter](https://github.com/cern-eos/eos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-EOS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-elgato_keylight",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Elgato Key Light devices.",link:"https://github.com/mdlayher/keylight_exporter",icon_filename:"elgato.svg",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Elgato Key Light devices.\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Elgato Key Light metrics for optimized lighting control and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Elgato_Key_Light_devices.",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-energomera",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Energomera smart power meters",link:"https://github.com/peak-load/energomera_exporter",icon_filename:"energomera.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Energomera smart power meters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Energomera electricity meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Energomera_smart_power_meters",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-frrouting",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"FRRouting",link:"https://github.com/tynany/frr_exporter",icon_filename:"frrouting.png",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# FRRouting\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Free Range Routing (FRR) metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FRRouting Exporter](https://github.com/tynany/frr_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-FRRouting",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-fastd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Fastd",link:"https://github.com/freifunk-darmstadt/fastd-exporter",icon_filename:"fastd.png",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Fastd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Fastd VPN metrics for efficient virtual private network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Fastd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-freebsd_nfs",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"FreeBSD NFS",link:"https://github.com/Axcient/freebsd-nfs-exporter",icon_filename:"freebsd.svg",categories:["data-collection.storage"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# FreeBSD NFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor FreeBSD Network File System metrics for efficient file sharing management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-FreeBSD_NFS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-freebsd_rctl",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"FreeBSD RCTL-RACCT",link:"https://github.com/yo000/rctl_exporter",icon_filename:"freebsd.svg",categories:["data-collection.operating-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# FreeBSD RCTL-RACCT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on FreeBSD Resource Container metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-FreeBSD_RCTL-RACCT",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-freifunk",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Freifunk network",link:"https://github.com/xperimental/freifunk-exporter",icon_filename:"freifunk.png",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Freifunk network\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Freifunk community network metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Freifunk_network",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gcp_gce",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"GCP GCE",link:"https://github.com/O1ahmad/gcp-gce-exporter",icon_filename:"gcp-gce.svg",categories:["data-collection.cloud-and-devops"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# GCP GCE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google Cloud Platform Compute Engine metrics for efficient cloud resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-GCP_GCE",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-enclosure",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Generic storage enclosure tool",link:"https://github.com/Gandi/jbod-rs",icon_filename:"storage-enclosure.svg",categories:["data-collection.storage"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Generic storage enclosure tool\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor storage enclosure metrics for efficient storage device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Generic_storage_enclosure_tool",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-github_ratelimit",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"GitHub API rate limit",link:"https://github.com/lunarway/github-ratelimit-exporter",icon_filename:"github.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# GitHub API rate limit\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GitHub API rate limit metrics for efficient\nAPI usage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-GitHub_API_rate_limit",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-github_repo",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"GitHub repository",link:"https://github.com/githubexporter/github-exporter",icon_filename:"github.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# GitHub repository\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack GitHub repository metrics for optimized project and user analytics monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub Exporter](https://github.com/githubexporter/github-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-GitHub_repository",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gitlab_runner",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"GitLab Runner",link:"https://gitlab.com/gitlab-org/gitlab-runner",icon_filename:"gitlab.png",categories:["data-collection.cloud-and-devops"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# GitLab Runner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GitLab CI/CD job metrics for efficient development and deployment management.\n\n\nMetrics are gathered by periodically sending HTTP requests to GitLab built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-GitLab_Runner",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gobetween",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Gobetween",link:"https://github.com/yyyar/gobetween",icon_filename:"gobetween.svg",categories:["data-collection.web-servers-and-proxies"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Gobetween\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Gobetween load balancer metrics for optimized network traffic management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to Gobetween built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Gobetween",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gcp",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Google Cloud Platform",link:"https://github.com/DazWilkin/gcp-exporter",icon_filename:"gcp.png",categories:["data-collection.cloud-and-devops"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Google Cloud Platform\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform metrics for comprehensive cloud resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Google_Cloud_Platform",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-google_pagespeed",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Google Pagespeed",link:"https://github.com/foomo/pagespeed_exporter",icon_filename:"google.svg",categories:["data-collection.applications"]},keywords:["cloud services","cloud computing","google cloud services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Google Pagespeed\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google PageSpeed Insights performance metrics for efficient web page optimization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Google_Pagespeed",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gcp_stackdriver",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Google Stackdriver",link:"https://github.com/prometheus-community/stackdriver_exporter",icon_filename:"gcp-stackdriver.svg",categories:["data-collection.cloud-and-devops"]},keywords:["cloud services","cloud computing","google cloud services"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Google Stackdriver\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Google Stackdriver monitoring metrics for optimized cloud performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Google_Stackdriver",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-grafana",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Grafana",link:"https://grafana.com/",icon_filename:"grafana.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Grafana\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Grafana dashboard and visualization metrics for optimized monitoring and data analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Grafana built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Grafana",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-graylog",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Graylog Server",link:"https://github.com/Graylog2/graylog2-server/",icon_filename:"graylog.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Graylog Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Graylog server metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Graylog built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Graylog_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-hana",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"HANA",link:"https://github.com/jenningsloy318/hana_exporter",icon_filename:"sap.svg",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# HANA\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SAP HANA database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HANA Exporter](https://github.com/jenningsloy318/hana_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-HANA",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-halon",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Halon",link:"https://github.com/tobiasbp/halon_exporter",icon_filename:"halon.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Halon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Halon email security and delivery metrics for optimized email management and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Halon exporter](https://github.com/tobiasbp/halon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Halon",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-hitron_coda",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Hitron CODA Cable Modem",link:"https://github.com/hairyhenderson/hitron_coda_exporter",icon_filename:"hitron.svg",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Hitron CODA Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Hitron CODA cable modem metrics for optimized internet connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Hitron_CODA_Cable_Modem",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-homebridge",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Homebridge",link:"https://github.com/lstrojny/homebridge-prometheus-exporter",icon_filename:"homebridge.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Homebridge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Homebridge smart home metrics for efficient home automation management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Homebridge",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-homey",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Homey",link:"https://github.com/rickardp/homey-prometheus-exporter",icon_filename:"homey.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Homey\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Homey smart home controller metrics for efficient home automation and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Homey",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-honeypot",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Honeypot",link:"https://github.com/Intrinsec/honeypot_exporter",icon_filename:"intrinsec.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Honeypot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor honeypot metrics for efficient threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Honeypot",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-hubble",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Hubble",link:"https://github.com/cilium/hubble",icon_filename:"hubble.png",categories:["data-collection.cloud-and-devops"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Hubble\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hubble network observability metrics for efficient network visibility and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to Hubble built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Hubble",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ibm_aix_njmon",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IBM AIX systems Njmon",link:"https://github.com/crooks/njmon_exporter",icon_filename:"ibm.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# IBM AIX systems Njmon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NJmon system performance monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NJmon](https://github.com/crooks/njmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IBM_AIX_systems_Njmon",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ibm_cex",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IBM CryptoExpress (CEX) cards",link:"https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin",icon_filename:"ibm.svg",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# IBM CryptoExpress (CEX) cards\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack IBM Z Crypto Express device metrics for optimized cryptographic performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IBM_CryptoExpress_(CEX)_cards",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ibm_mq",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IBM MQ",link:"https://github.com/agebhar1/mq_exporter",icon_filename:"ibm.svg",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# IBM MQ\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on IBM MQ message queue metrics for efficient message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQ Exporter](https://github.com/agebhar1/mq_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IBM_MQ",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ibm_spectrum",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IBM Spectrum",link:"https://github.com/topine/ibm-spectrum-exporter",icon_filename:"ibm.svg",categories:["data-collection.storage"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# IBM Spectrum\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum storage metrics for efficient data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IBM_Spectrum",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ibm_spectrum_virtualize",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IBM Spectrum Virtualize",link:"https://github.com/bluecmd/spectrum_virtualize_exporter",icon_filename:"ibm.svg",categories:["data-collection.storage"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# IBM Spectrum Virtualize\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum Virtualize metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IBM_Spectrum_Virtualize",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ibm_zhmc",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"IBM Z Hardware Management Console",link:"https://github.com/zhmcclient/zhmc-prometheus-exporter",icon_filename:"ibm.svg",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# IBM Z Hardware Management Console\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Z Hardware Management Console metrics for efficient mainframe management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-IBM_Z_Hardware_Management_Console",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-influxdb",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"InfluxDB",link:"https://github.com/prometheus/influxdb_exporter",icon_filename:"influxdb.svg",categories:["data-collection.databases"]},keywords:["database","dbms","data storage"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# InfluxDB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor InfluxDB time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-InfluxDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-jmx",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"JMX",link:"https://github.com/prometheus/jmx_exporter",icon_filename:"java.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# JMX\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Java Management Extensions (JMX) metrics for efficient Java application management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JMX Exporter](https://github.com/prometheus/jmx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-JMX",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-jarvis",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Jarvis Standing Desk",link:"https://github.com/hairyhenderson/jarvis_exporter/",icon_filename:"jarvis.jpg",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Jarvis Standing Desk\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jarvis standing desk usage metrics for efficient workspace ergonomics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Jarvis_Standing_Desk",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-jenkins",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Jenkins",link:"https://www.jenkins.io/",icon_filename:"jenkins.svg",categories:["data-collection.cloud-and-devops"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Jenkins\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jenkins continuous integration server metrics for efficient development and build management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Jenkins",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-kafka",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Kafka",link:"https://github.com/danielqsj/kafka_exporter/",icon_filename:"kafka.svg",categories:["data-collection.databases"]},keywords:["big data","stream processing","message broker"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Kafka\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kafka message queue metrics for optimized data streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Kafka",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-kafka_consumer_lag",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Kafka Consumer Lag",link:"https://github.com/omarsmak/kafka-consumer-lag-monitoring",icon_filename:"kafka.svg",categories:["data-collection.databases"]},keywords:["big data","stream processing","message broker"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Kafka Consumer Lag\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka consumer lag metrics for efficient message queue management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Kafka_Consumer_Lag",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-kafka_zookeeper",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Kafka ZooKeeper",link:"https://github.com/cloudflare/kafka_zookeeper_exporter",icon_filename:"kafka.svg",categories:["data-collection.databases"]},keywords:["big data","stream processing","message broker"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Kafka ZooKeeper\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka ZooKeeper metrics for optimized distributed coordination and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Kafka_ZooKeeper",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-kannel",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Kannel",link:"https://github.com/apostvav/kannel_exporter",icon_filename:"kannel.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Kannel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kannel SMS gateway and WAP gateway metrics for efficient mobile communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kannel Exporter](https://github.com/apostvav/kannel_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Kannel",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-keepalived",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Keepalived",link:"https://github.com/gen2brain/keepalived_exporter",icon_filename:"Keepalived.png",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Keepalived\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Keepalived metrics for efficient high-availability and load balancing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Keepalived",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-linode",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Linode",link:"https://github.com/DazWilkin/linode-exporter",icon_filename:"linode.svg",categories:["data-collection.cloud-and-devops"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Linode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Linode cloud hosting metrics for efficient virtual server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Linode Exporter](https://github.com/DazWilkin/linode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Linode",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-lustre",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Lustre metadata",link:"https://github.com/GSI-HPC/prometheus-cluster-exporter",icon_filename:"lustre.png",categories:["data-collection.storage"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Lustre metadata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Lustre clustered file system for efficient management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Lustre_metadata",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-lynis",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Lynis audit reports",link:"https://github.com/MauveSoftware/lynis_exporter",icon_filename:"lynis.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Lynis audit reports\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Lynis security auditing tool metrics for efficient system security and compliance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Lynis_audit_reports",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-mqtt_blackbox",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"MQTT Blackbox",link:"https://github.com/inovex/mqtt_blackbox_exporter",icon_filename:"mqtt.svg",categories:["data-collection.synthetic-testing"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# MQTT Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MQTT message transport performance using blackbox testing methods.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-MQTT_Blackbox",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-meilisearch",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Meilisearch",link:"https://github.com/scottaglia/meilisearch_exporter",icon_filename:"meilisearch.svg",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Meilisearch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Meilisearch search engine metrics for efficient search performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Meilisearch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-mesos",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Mesos",link:"http://github.com/mesosphere/mesos_exporter",icon_filename:"mesos.svg",categories:["data-collection.containers-and-vms"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Mesos\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Mesos cluster manager metrics for efficient resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Mesos exporter](http://github.com/mesosphere/mesos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Mesos",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-minecraft",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Minecraft",link:"https://github.com/sladkoff/minecraft-prometheus-exporter",icon_filename:"minecraft.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Minecraft\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Minecraft server metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Minecraft",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-modbus_rtu",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Modbus protocol",link:"https://github.com/dernasherbrezon/modbusrtu_exporter",icon_filename:"modbus.svg",categories:["data-collection.hardware-and-sensors"]},keywords:["database","dbms","data storage"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Modbus protocol\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Modbus RTU protocol metrics for efficient industrial automation and control performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Modbus_protocol",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-mogilefs",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"MogileFS",link:"https://github.com/KKBOX/mogilefs-exporter",icon_filename:"filesystem.svg",categories:["data-collection.storage"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# MogileFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor MogileFS distributed file system metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-MogileFS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-nrpe",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"NRPE daemon",link:"https://github.com/canonical/nrpe_exporter",icon_filename:"nrpelinux.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# NRPE daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nagios Remote Plugin Executor (NRPE) metrics for efficient system and network monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NRPE exporter](https://github.com/canonical/nrpe_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-NRPE_daemon",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-nagios",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Nagios",link:"https://github.com/wbollock/nagios_exporter",icon_filename:"nagios.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Nagios\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Nagios network monitoring metrics for efficient\nIT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nagios exporter](https://github.com/wbollock/nagios_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Nagios",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-nature_remo",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Nature Remo E lite devices",link:"https://github.com/kenfdev/remo-exporter",icon_filename:"nature-remo.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Nature Remo E lite devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nature Remo E series smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Nature_Remo_E_lite_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-netapp_solidfire",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"NetApp Solidfire",link:"https://github.com/mjavier2k/solidfire-exporter",icon_filename:"netapp.svg",categories:["data-collection.storage"]},keywords:["network monitoring","network performance","traffic analysis"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# NetApp Solidfire\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetApp Solidfire storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-NetApp_Solidfire",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-netapp_ontap",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Netapp ONTAP API",link:"https://github.com/sapcc/netapp-api-exporter",icon_filename:"netapp.svg",categories:["data-collection.storage"]},keywords:["network monitoring","network performance","traffic analysis"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Netapp ONTAP API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetApp ONTAP storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Netapp_ONTAP_API",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-netatmo",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Netatmo sensors",link:"https://github.com/xperimental/netatmo-exporter",icon_filename:"netatmo.svg",categories:["data-collection.hardware-and-sensors"]},keywords:["network monitoring","network performance","traffic analysis"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Netatmo sensors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Netatmo smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netatmo exporter](https://github.com/xperimental/netatmo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Netatmo_sensors",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-nextdns",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"NextDNS",link:"https://github.com/raylas/nextdns-exporter",icon_filename:"nextdns.png",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# NextDNS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NextDNS DNS resolver and security platform metrics for efficient DNS management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nextdns-exporter](https://github.com/raylas/nextdns-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-NextDNS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-nextcloud",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Nextcloud servers",link:"https://github.com/xperimental/nextcloud-exporter",icon_filename:"nextcloud.png",categories:["data-collection.applications"]},keywords:["cloud services","cloud computing","scalability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Nextcloud servers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Nextcloud cloud storage metrics for efficient file hosting and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Nextcloud_servers",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-obs_studio",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OBS Studio",link:"https://github.com/lukegb/obs_studio_exporter",icon_filename:"obs-studio.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# OBS Studio\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OBS Studio live streaming and recording software metrics for efficient video production and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OBS_Studio",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-openrc",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OpenRC",link:"https://git.sr.ht/~tomleb/openrc-exporter",icon_filename:"linux.png",categories:["data-collection.operating-systems"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# OpenRC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on OpenRC init system metrics for efficient system startup and service management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OpenRC",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-openroadm",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OpenROADM devices",link:"https://github.com/utdal/openroadm_exporter",icon_filename:"openroadm.png",categories:["data-collection.networking"]},keywords:["network monitoring","network performance","traffic analysis"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# OpenROADM devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenROADM optical transport network metrics using the NETCONF protocol for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OpenROADM_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-openweathermap",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"OpenWeatherMap",link:"https://github.com/billykwooten/openweather-exporter",icon_filename:"openweather.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# OpenWeatherMap\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenWeatherMap weather data and air pollution metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenWeatherMap Exporter](https://github.com/billykwooten/openweather-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenWeatherMap Exporter](https://github.com/billykwooten/openweather-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-OpenWeatherMap",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-openvswitch",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Open vSwitch",link:"https://github.com/digitalocean/openvswitch_exporter",icon_filename:"ovs.png",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Open vSwitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Open vSwitch software-defined networking metrics for efficient network virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Open_vSwitch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-patroni",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Patroni",link:"https://github.com/gopaytech/patroni_exporter",icon_filename:"patroni.png",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Patroni\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Patroni PostgreSQL high-availability metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Patroni Exporter](https://github.com/gopaytech/patroni_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Patroni",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-pws",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Personal Weather Station",link:"https://github.com/JohnOrthoefer/pws-exporter",icon_filename:"wunderground.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Personal Weather Station\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack personal weather station metrics for efficient weather monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Personal_Weather_Station",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-pgpool2",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Pgpool-II",link:"https://github.com/pgpool/pgpool2_exporter",icon_filename:"pgpool2.png",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Pgpool-II\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pgpool-II PostgreSQL middleware metrics for efficient database connection management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Pgpool-II",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-philips_hue",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Philips Hue",link:"https://github.com/aexel90/hue_exporter",icon_filename:"hue.svg",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Philips Hue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Philips Hue smart lighting metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Philips Hue Exporter](https://github.com/aexel90/hue_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Philips_Hue",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-pimoroni_enviro_plus",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Pimoroni Enviro+",link:"https://github.com/terradolor/prometheus-enviro-exporter",icon_filename:"pimorino.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Pimoroni Enviro+\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pimoroni Enviro+ air quality and environmental metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Pimoroni_Enviro+",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-podman",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Podman",link:"https://github.com/containers/prometheus-podman-exporter",icon_filename:"podman.png",categories:["data-collection.containers-and-vms"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Podman\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Podman container runtime metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Podman",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-powerpal",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Powerpal devices",link:"https://github.com/aashley/powerpal_exporter",icon_filename:"powerpal.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Powerpal devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Powerpal smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Powerpal Exporter](https://github.com/aashley/powerpal_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Powerpal_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-proftpd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"ProFTPD",link:"https://github.com/transnano/proftpd_exporter",icon_filename:"proftpd.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# ProFTPD\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ProFTPD FTP server metrics for efficient file transfer and server performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-ProFTPD",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-generic",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Prometheus endpoint",link:"https://prometheus.io/",icon_filename:"prometheus.svg",categories:["data-collection.applications"]},keywords:["prometheus","openmetrics"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Prometheus endpoint\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nThis generic Prometheus collector gathers metrics from any [`Prometheus`](https://prometheus.io/) endpoints.\n\n\nIt collects metrics by periodically sending HTTP requests to the target instance.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Prometheus_endpoint",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-proxmox",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Proxmox VE",link:"https://github.com/prometheus-pve/prometheus-pve-exporter",icon_filename:"proxmox.png",categories:["data-collection.containers-and-vms"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Proxmox VE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Proxmox Virtual Environment metrics for efficient virtualization and container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Proxmox_VE",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-radius",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"RADIUS",link:"https://github.com/devon-mar/radius-exporter",icon_filename:"radius.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# RADIUS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RADIUS (Remote Authentication Dial-In User Service) protocol metrics for efficient authentication and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RADIUS exporter](https://github.com/devon-mar/radius-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-RADIUS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ripe_atlas",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"RIPE Atlas",link:"https://github.com/czerwonk/atlas_exporter",icon_filename:"ripe.png",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# RIPE Atlas\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RIPE Atlas Internet measurement platform metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-RIPE_Atlas",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-radio_thermostat",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Radio Thermostat",link:"https://github.com/andrewlow/radio-thermostat-exporter",icon_filename:"radiots.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Radio Thermostat\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Radio Thermostat smart thermostat metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Radio_Thermostat",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-raritan_pdu",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Raritan PDU",link:"https://github.com/psyinfra/prometheus-raritan-pdu-exporter",icon_filename:"raritan.svg",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Raritan PDU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Raritan Power Distribution Unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Raritan_PDU",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-redis_queue",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Redis Queue",link:"https://github.com/mdawar/rq-exporter",icon_filename:"rq.png",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Redis Queue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Python RQ (Redis Queue) job queue metrics for efficient task management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Python RQ Exporter](https://github.com/mdawar/rq-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Redis_Queue",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sabnzbd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"SABnzbd",link:"https://github.com/msroest/sabnzbd_exporter",icon_filename:"sabnzbd.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# SABnzbd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SABnzbd Usenet client metrics for efficient file downloads and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-SABnzbd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sma_inverter",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"SMA Inverters",link:"https://github.com/dr0ps/sma_inverter_exporter",icon_filename:"sma.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# SMA Inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SMA solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-SMA_Inverters",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sonic",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"SONiC NOS",link:"https://github.com/kamelnetworks/sonic_exporter",icon_filename:"sonic.png",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# SONiC NOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Software for Open Networking in the Cloud (SONiC) metrics for efficient network switch management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-SONiC_NOS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-salicru_eqx",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Salicru EQX inverter",link:"https://github.com/alejandroscf/prometheus_salicru_exporter",icon_filename:"salicru.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Salicru EQX inverter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Salicru EQX solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Salicru_EQX_inverter",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-scylladb",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"ScyllaDB",link:"https://www.scylladb.com/",icon_filename:"scylladb.svg",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# ScyllaDB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack ScyllaDB NoSQL database metrics for efficient database management and performance with Netdata's Prometheus integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to the ScyllaDB built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-ScyllaDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sense_energy",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Sense Energy",link:"https://github.com/ejsuncy/sense_energy_prometheus_exporter",icon_filename:"sense.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Sense Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Sense Energy smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Sense_Energy",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-shelly",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Shelly humidity sensor",link:"https://github.com/aexel90/shelly_exporter",icon_filename:"shelly.jpg",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Shelly humidity sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Shelly smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Shelly Exporter](https://github.com/aexel90/shelly_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Shelly_humidity_sensor",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-s7_plc",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Siemens S7 PLC",link:"https://github.com/MarcusCalidus/s7-plc-exporter",icon_filename:"siemens.svg",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Siemens S7 PLC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Siemens S7 Programmable Logic Controller (PLC) metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Siemens_S7_PLC",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-site24x7",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Site 24x7",link:"https://github.com/svenstaro/site24x7_exporter",icon_filename:"site24x7.svg",categories:["data-collection.synthetic-testing"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Site 24x7\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Site24x7 website and infrastructure monitoring metrics for efficient performance tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Site_24x7",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-slurm",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Slurm",link:"https://github.com/SckyzO/slurm_exporter",icon_filename:"slurm.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Slurm\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Slurm workload manager metrics for efficient high-performance computing (HPC) and cluster management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [slurm exporter](https://github.com/SckyzO/slurm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [slurm exporter](https://github.com/SckyzO/slurm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Slurm",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sml",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Smart meters SML",link:"https://github.com/mweinelt/sml-exporter",icon_filename:"sml.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Smart meters SML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Smart Message Language (SML) metrics for efficient smart metering and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SML Exporter](https://github.com/mweinelt/sml-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Smart_meters_SML",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-softether",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"SoftEther VPN Server",link:"https://github.com/dalance/softether_exporter",icon_filename:"softether.svg",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# SoftEther VPN Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SoftEther VPN Server metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoftEther Exporter](https://github.com/dalance/softether_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-SoftEther_VPN_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-lsx",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Solar logging stick",link:"https://gitlab.com/bhavin192/lsx-exporter",icon_filename:"solar.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Solar logging stick\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor solar energy metrics using a solar logging stick for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Solar_logging_stick",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-solis",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Solis Ginlong 5G inverters",link:"https://github.com/candlerb/solis_exporter",icon_filename:"solis.jpg",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Solis Ginlong 5G inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Solis solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solis Exporter](https://github.com/candlerb/solis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Solis_Ginlong_5G_inverters",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-spacelift",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Spacelift",link:"https://github.com/spacelift-io/prometheus-exporter",icon_filename:"spacelift.png",categories:["data-collection.cloud-and-devops"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Spacelift\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Spacelift infrastructure-as-code (IaC) platform metrics for efficient infrastructure automation and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Spacelift",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sphinx",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Sphinx",link:"https://github.com/foxdalas/sphinx_exporter",icon_filename:"sphinx.png",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Sphinx\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Sphinx search engine metrics for efficient search and indexing performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Sphinx",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-starlink",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Starlink (SpaceX)",link:"https://github.com/danopstech/starlink_exporter",icon_filename:"starlink.svg",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Starlink (SpaceX)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SpaceX Starlink satellite internet metrics for efficient internet service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Starlink_(SpaceX)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-statuspage",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"StatusPage",link:"https://github.com/vladvasiliu/statuspage-exporter",icon_filename:"statuspage.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# StatusPage\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor StatusPage.io incident and status metrics for efficient incident management and communication.\n\n\nMetrics are gathered by periodically sending HTTP requests to [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-StatusPage",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-steam_a2s",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Steam",link:"https://github.com/armsnyder/a2s-exporter",icon_filename:"a2s.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Steam\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nGain insights into Steam A2S-supported game servers for performance and availability through real-time metric monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A2S Exporter](https://github.com/armsnyder/a2s-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Steam",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-storidge",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Storidge",link:"https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md",icon_filename:"storidge.png",categories:["data-collection.storage"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Storidge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Storidge storage metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Storidge",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sunspec",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Sunspec Solar Energy",link:"https://github.com/inosion/prometheus-sunspec-exporter",icon_filename:"sunspec.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Sunspec Solar Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SunSpec Alliance solar energy metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Sunspec_Solar_Energy",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-suricata",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Suricata",link:"https://github.com/corelight/suricata_exporter",icon_filename:"suricata.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Suricata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Suricata network intrusion detection and prevention system (IDS/IPS) metrics for efficient network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Suricata Exporter](https://github.com/corelight/suricata_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Suricata",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-synology_activebackup",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Synology ActiveBackup",link:"https://github.com/codemonauts/activebackup-prometheus-exporter",icon_filename:"synology.png",categories:["data-collection.storage"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Synology ActiveBackup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Synology Active Backup metrics for efficient backup and data protection management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Synology_ActiveBackup",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-sysload",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Sysload",link:"https://github.com/egmc/sysload_exporter",icon_filename:"sysload.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Sysload\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system load metrics for efficient system performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sysload Exporter](https://github.com/egmc/sysload_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Sysload",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-tacas",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"TACACS",link:"https://github.com/devon-mar/tacacs-exporter",icon_filename:"tacacs.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# TACACS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Terminal Access Controller Access-Control System (TACACS) protocol metrics for efficient network authentication and authorization management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-TACACS",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-tado",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Tado smart heating solution",link:"https://github.com/eko/tado-exporter",icon_filename:"tado.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Tado smart heating solution\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tado smart thermostat metrics for efficient home heating and cooling management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tado\\xB0 Exporter](https://github.com/eko/tado-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Tado_smart_heating_solution",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-tankerkoenig",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Tankerkoenig API",link:"https://github.com/lukasmalkmus/tankerkoenig_exporter",icon_filename:"tanker.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Tankerkoenig API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tankerknig API fuel price metrics for efficient fuel price monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Tankerkoenig_API",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-tesla_wall_connector",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Tesla Wall Connector",link:"https://github.com/benclapp/tesla_wall_connector_exporter",icon_filename:"tesla.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Tesla Wall Connector\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Wall Connector charging station metrics for efficient electric vehicle charging management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Tesla_Wall_Connector",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-tesla_vehicle",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Tesla vehicle",link:"https://github.com/wywywywy/tesla-prometheus-exporter",icon_filename:"tesla.png",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Tesla vehicle\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tesla vehicle metrics for efficient electric vehicle management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Tesla_vehicle",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-twitch",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Twitch",link:"https://github.com/damoun/twitch_exporter",icon_filename:"twitch.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Twitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Twitch streaming platform metrics for efficient live streaming management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Twitch exporter](https://github.com/damoun/twitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Twitch",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-ubiquity_ufiber",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Ubiquiti UFiber OLT",link:"https://github.com/swoga/ufiber-exporter",icon_filename:"ubiquiti.png",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Ubiquiti UFiber OLT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Ubiquiti UFiber GPON (Gigabit Passive Optical Network) device metrics for efficient fiber-optic network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ufiber-exporter](https://github.com/swoga/ufiber-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Ubiquiti_UFiber_OLT",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-uptimerobot",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Uptimerobot",link:"https://github.com/wosc/prometheus-uptimerobot",icon_filename:"uptimerobot.svg",categories:["data-collection.synthetic-testing"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Uptimerobot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor UptimeRobot website uptime monitoring metrics for efficient website availability tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Uptimerobot",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-vscode",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"VSCode",link:"https://github.com/guicaulada/vscode-exporter",icon_filename:"vscode.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# VSCode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Visual Studio Code editor metrics for efficient development environment management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [VSCode Exporter](https://github.com/guicaulada/vscode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-VSCode",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-vault_pki",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Vault PKI",link:"https://github.com/aarnaud/vault-pki-exporter",icon_filename:"vault.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Vault PKI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HashiCorp Vault Public Key Infrastructure (PKI) metrics for efficient certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Vault_PKI",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-vertica",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Vertica",link:"https://github.com/vertica/vertica-prometheus-exporter",icon_filename:"vertica.svg",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Vertica\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Vertica analytics database platform metrics for efficient database performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Vertica",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-warp10",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Warp10",link:"https://github.com/centreon/warp10-sensision-exporter",icon_filename:"warp10.svg",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Warp10\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Warp 10 time-series database metrics for efficient time-series data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Warp10",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-xiaomi_mi_flora",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Xiaomi Mi Flora",link:"https://github.com/xperimental/flowercare-exporter",icon_filename:"xiaomi.svg",categories:["data-collection.hardware-and-sensors"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Xiaomi Mi Flora\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MiFlora plant monitor metrics for efficient plant care and growth management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Xiaomi_Mi_Flora",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-yourls",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"YOURLS URL Shortener",link:"https://github.com/just1not2/prometheus-exporter-yourls",icon_filename:"yourls.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# YOURLS URL Shortener\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor YOURLS (Your Own URL Shortener) metrics for efficient URL shortening service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-YOURLS_URL_Shortener",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-zerto",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"Zerto",link:"https://github.com/claranet/zerto-exporter",icon_filename:"zerto.png",categories:["data-collection.cloud-and-devops"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# Zerto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zerto disaster recovery and data protection metrics for efficient backup and recovery management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zerto Exporter](https://github.com/claranet/zerto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-Zerto",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-etcd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"etcd",link:"https://etcd.io/",icon_filename:"etcd.svg",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# etcd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack etcd database metrics for optimized distributed key-value store management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to etcd built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-etcd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-gpsd",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"gpsd",link:"https://github.com/natesales/gpsd-exporter",icon_filename:"gpsd.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# gpsd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GPSD (GPS daemon) metrics for efficient GPS data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [gpsd exporter](https://github.com/natesales/gpsd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-gpsd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-journald",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"journald",link:"https://github.com/dead-claudia/journald-exporter",icon_filename:"linux.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# journald\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on systemd-journald metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [journald-exporter](https://github.com/dead-claudia/journald-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-journald",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-loki",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"loki",link:"https://github.com/grafana/loki",icon_filename:"loki.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# loki\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Loki metrics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [loki](https://github.com/grafana/loki).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Loki\n\nInstall [loki](https://github.com/grafana/loki) according to its documentation.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-loki",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-mosquitto",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"mosquitto",link:"https://github.com/sapcc/mosquitto-exporter",icon_filename:"mosquitto.svg",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# mosquitto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Mosquitto MQTT broker metrics for efficient IoT message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-mosquitto",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-mtail",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"mtail",link:"https://github.com/google/mtail",icon_filename:"mtail.png",categories:["data-collection.applications"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# mtail\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor log data metrics using mtail log data extractor and parser.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mtail](https://github.com/google/mtail).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-mtail",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-pgbackrest",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"pgBackRest",link:"https://github.com/woblerr/pgbackrest_exporter",icon_filename:"pgbackrest.png",categories:["data-collection.databases"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# pgBackRest\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor pgBackRest PostgreSQL backup metrics for efficient database backup and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-pgBackRest",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-prometheus-strongswan",module_name:"prometheus",plugin_name:"go.d.plugin",monitored_instance:{name:"strongSwan",link:"https://github.com/jlti-dev/ipsec_exporter",icon_filename:"strongswan.svg",categories:["data-collection.networking"]},keywords:[],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},community:!0},overview:"# strongSwan\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack strongSwan VPN and IPSec metrics using the vici interface for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **prometheus** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **prometheus**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/prometheus.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 10 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 10 | no |\n| **Filters** | [selector](#option-filters-selector) | Time series selector (filter). |  | no |\n| **Limits** | max_time_series | Global time series limit. If an endpoint returns more time series than this, the data is not processed. | 2000 | no |\n|  | max_time_series_per_metric | Per-metric time series limit. Metrics with more time series than this are skipped. | 200 | no |\n| **Customization** | [fallback_type](#option-customization-fallback-type) | Fallback type rules for untyped metrics. |  | no |\n|  | label_prefix | Optional prefix added to all labels of all charts. Labels will be formatted as `prefix_name`. |  | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id="option-filters-selector"></a>\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/netdata/blob/master/src/go/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n  allow:\n    - pattern1\n    - pattern2\n  deny:\n    - pattern3\n    - pattern4\n```\n\n\n<a id="option-customization-fallback-type"></a>\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n  counter:\n    - metric_name_pattern1\n    - metric_name_pattern2\n  gauge:\n    - metric_name_pattern3\n    - metric_name_pattern4\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **prometheus** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the prometheus data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _prometheus_ (or scroll the list) to locate the **prometheus** collector.\n5. Click the **+** next to the **prometheus** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n\n##### Examples\n\n###### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n```\n###### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details open=true summary="Config" %}\n```yaml\n# use "file://" scheme\njobs:\n  - name: myapp\n    url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n###### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:9090/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:9090/metrics\n\n  - name: remote\n    url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m prometheus -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep prometheus\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep prometheus /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep prometheus\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric                    | Chart                                     | Dimension(s)         | Algorithm   |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge                     | for each label set                        | one, the metric name | absolute    |\n| Counter                   | for each label set                        | one, the metric name | incremental |\n| Summary (quantiles)       | for each label set (excluding 'quantile') | for each quantile    | absolute    |\n| Summary (sum and count)   | for each label set                        | the metric name      | incremental |\n| Histogram (buckets)       | for each label set (excluding 'le')       | for each bucket      | incremental |\n| Histogram (sum and count) | for each label set                        | the metric name      | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n",integration_type:"collector",id:"go.d.plugin-prometheus-strongSwan",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/prometheus/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-proxysql",plugin_name:"go.d.plugin",module_name:"proxysql",monitored_instance:{name:"ProxySQL",link:"https://www.proxysql.com/",icon_filename:"proxysql.png",categories:["data-collection.databases"]},keywords:["proxysql","databases","sql"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# ProxySQL\n\nPlugin: go.d.plugin\nModule: proxysql\n\n## Overview\n\nThis collector monitors ProxySQL servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **proxysql** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **proxysql**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/proxysql.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | dsn | ProxySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | stats:stats@tcp(127.0.0.1:6032)/ | yes |\n|  | timeout | Query timeout (seconds). | 1 | no |\n| **Functions** | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **proxysql** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the proxysql data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _proxysql_ (or scroll the list) to locate the **proxysql** collector.\n5. Click the **+** next to the **proxysql** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/proxysql.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/proxysql.conf\n```\n\n##### Examples\n\n###### TCP socket\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n```\n{% /details %}\n###### my.cnf\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    my.cnf: \'/etc/my.cnf\'\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n  - name: remote\n    dsn: stats:stats@tcp(203.0.113.0:6032)/\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `proxysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m proxysql\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m proxysql -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `proxysql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep proxysql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep proxysql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep proxysql\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ proxysql_hostgroup_no_online_backends ](https://github.com/netdata/netdata/blob/master/src/health/health.d/proxysql.conf) | proxysql.hostgroup_backends_status | ProxySQL hostgroup ${label:hostgroup} has no ONLINE backends |\n| [ proxysql_backend_shunned ](https://github.com/netdata/netdata/blob/master/src/health/health.d/proxysql.conf) | proxysql.backend_status | ProxySQL backend SHUNNED (${label:host}:${label:port} hostgroup ${label:hostgroup}) |\n| [ proxysql_backend_offline_hard ](https://github.com/netdata/netdata/blob/master/src/health/health.d/proxysql.conf) | proxysql.backend_status | ProxySQL backend OFFLINE_HARD (${label:host}:${label:port} hostgroup ${label:hostgroup}) |\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves aggregated query statistics from ProxySQL's [stats_mysql_query_digest](https://proxysql.com/documentation/stats-statistics/#stats_mysql_query_digest) table.\n\nThis function queries the `stats_mysql_query_digest` table which stores runtime statistics for all queries proxied through ProxySQL, aggregated by query digest (normalized query pattern). It provides timing metrics, execution counts, and error statistics for each unique query pattern.\n\nUse cases:\n- Identify slow queries consuming excessive total execution time\n- Find high-frequency queries that may benefit from caching\n- Monitor query error rates across backends\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Proxysql:top-queries` |\n| Require Cloud | yes |\n| Performance | Queries ProxySQL admin interface for digest statistics:<br/>\u2022 Reads from in-memory `stats_mysql_query_digest` table<br/>\u2022 Default limit of 500 rows balances completeness with performance<br/>\u2022 Data is aggregated in-memory by ProxySQL from active connections |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in WHERE clauses or INSERT values<br/>\u2022 Business data embedded in queries<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to ProxySQL admin interface<br/>\u2022 Returns HTTP 503 if the connection cannot be established<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\nNo additional configuration is required.\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Options include total execution time, number of calls, rows affected, rows sent, errors, and warnings. Defaults to total time to focus on most resource-intensive queries. | yes | totalTime |  |\n\n#### Returns\n\nAggregated query digest statistics from ProxySQL, providing comprehensive performance analysis across all monitored MySQL backends. Each row represents a unique query pattern (normalized digest) with cumulative metrics across all its executions.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Digest | string |  | hidden | Unique hash identifier for normalized query pattern. Queries with identical structure but different literal values share the same digest. |\n| Query | string |  |  | The SQL query text with literal values truncated at 4096 characters. Use this to identify the actual SQL being executed and spot parameterized queries or injection risks. |\n| Schema | string |  |  | Database name where the query was executed. Essential for multi-database analysis to identify which database or backend is experiencing query load. |\n| User | string |  | hidden | MySQL username used to execute the query. Useful for identifying application users or connection pool attribution. |\n| Hostgroup | integer |  | hidden | Backend hostgroup identifier from ProxySQL configuration. Allows grouping queries by backend server for multi-backend analysis. |\n| Calls | integer |  |  | Total number of times this query pattern has been executed. High values indicate frequently run queries that may impact server performance significantly. |\n| Total Time | duration | milliseconds |  | Cumulative execution time across all query executions. This is a key metric for identifying the most resource-intensive queries in terms of total server time consumption. |\n| Avg Time | duration | milliseconds |  | Average execution time per query run. Compare with Total Time to determine if individual executions or high frequency drives resource usage. |\n| Min Time | duration | milliseconds | hidden | Minimum execution time observed. Helps identify variability in query performance and spot potential optimization opportunities for outliers. |\n| Max Time | duration | milliseconds | hidden | Maximum execution time observed. Large gaps between Min Time and Max Time may indicate performance instability due to parameter sniffing, data skew, or lock contention. |\n| Rows Affected | integer |  |  | Total number of rows modified by INSERT, UPDATE, DELETE, or REPLACE statements. Useful for tracking write workloads and data modification patterns. |\n| Rows Sent | integer |  |  | Total number of rows returned to the client by SELECT statements. High values may indicate queries returning large result sets that consume significant network bandwidth and client resources. |\n| Errors | integer |  |  | Total number of times this query pattern resulted in an error. Non-zero values require investigation into the underlying SQL syntax, permission issues, or constraint violations. |\n| Warnings | integer |  |  | Total number of times this query pattern generated a warning. Warnings may indicate data type conversions, NULL handling issues, or other non-critical SQL problems that should be reviewed. |\n| First Seen | string |  | hidden | Timestamp when this query pattern was first observed. Helps identify new queries that may have been introduced by application changes or code deployments. |\n| Last Seen | string |  | hidden | Timestamp when this query pattern was last executed. Can help identify stale queries that are no longer in use or to track recent query activity. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ProxySQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.client_connections_count | connected, non_idle, hostgroup_locked | connections |\n| proxysql.client_connections_rate | created, aborted | connections/s |\n| proxysql.server_connections_count | connected | connections |\n| proxysql.server_connections_rate | created, aborted, delayed | connections/s |\n| proxysql.backends_traffic | recv, sent | B/s |\n| proxysql.clients_traffic | recv, sent | B/s |\n| proxysql.active_transactions_count | client | connections |\n| proxysql.questions_rate | questions | questions/s |\n| proxysql.slow_queries_rate | slow | queries/s |\n| proxysql.queries_rate | autocommit, autocommit_filtered, commit_filtered, rollback, rollback_filtered, backend_change_user, backend_init_db, backend_set_names, frontend_init_db, frontend_set_names, frontend_use_db | queries/s |\n| proxysql.backend_statements_count | total, unique | statements |\n| proxysql.backend_statements_rate | prepare, execute, close | statements/s |\n| proxysql.client_statements_count | total, unique | statements |\n| proxysql.client_statements_rate | prepare, execute, close | statements/s |\n| proxysql.cached_statements_count | cached | statements |\n| proxysql.query_cache_entries_count | entries | entries |\n| proxysql.query_cache_memory_used | used | B |\n| proxysql.query_cache_io | in, out | B/s |\n| proxysql.query_cache_requests_rate | read, write, read_success | requests/s |\n| proxysql.mysql_monitor_workers_count | workers, auxiliary | threads |\n| proxysql.mysql_monitor_workers_rate | started | workers/s |\n| proxysql.mysql_monitor_connect_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_ping_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_read_only_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_replication_lag_checks_rate | succeed, failed | checks/s |\n| proxysql.jemalloc_memory_used | active, allocated, mapped, metadata, resident, retained | B |\n| proxysql.memory_used | auth, sqlite3, query_digest, query_rules, firewall_users_table, firewall_users_config, firewall_rules_table, firewall_rules_config, mysql_threads, admin_threads, cluster_threads | B |\n| proxysql.uptime | uptime | seconds |\n\n### Per command\n\nThese metrics refer to the SQL command.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| command | SQL command. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_command_execution_rate | uptime | seconds |\n| proxysql.mysql_command_execution_time | time | microseconds |\n| proxysql.mysql_command_execution_duration | 100us, 500us, 1ms, 5ms, 10ms, 50ms, 100ms, 500ms, 1s, 5s, 10s, +Inf | microseconds |\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| user | username from the mysql_users table |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_user_connections_utilization | used | percentage |\n| proxysql.mysql_user_connections_count | used | connections |\n\n### Per hostgroup\n\nThese metrics refer to the backends hostgroup.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| hostgroup | hostgroup identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.hostgroup_backends_status | online, shunned, offline_soft, offline_hard | backends |\n\n### Per backend\n\nThese metrics refer to the backend server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| hostgroup | backend server hostgroup |\n| host | backend server host |\n| port | backend server port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.backend_status | online, shunned, offline_soft, offline_hard | status |\n| proxysql.backend_connections_usage | free, used | connections |\n| proxysql.backend_connections_rate | succeed, failed | connections/s |\n| proxysql.backend_queries_rate | queries | queries/s |\n| proxysql.backend_traffic | recv, send | B/s |\n| proxysql.backend_latency | latency | microseconds |\n\n",integration_type:"collector",id:"go.d.plugin-proxysql-ProxySQL",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/proxysql/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-pulsar",plugin_name:"go.d.plugin",module_name:"pulsar",monitored_instance:{name:"Apache Pulsar",link:"https://pulsar.apache.org/",icon_filename:"pulsar.svg",categories:["data-collection.databases"]},keywords:["pulsar"],related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# Apache Pulsar\n\nPlugin: go.d.plugin\nModule: pulsar\n\n## Overview\n\nThis collector monitors Pulsar servers.\n\n\nIt collects broker statistics using Pulsar\'s [Prometheus endpoint](https://pulsar.apache.org/docs/en/deploy-monitoring/#broker-stats).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nApache Pulsar can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Pulsar instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **pulsar** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **pulsar**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/pulsar.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 60 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8080/metrics | yes |\n|  | timeout | HTTP request timeout (seconds). | 5 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **pulsar** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the pulsar data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _pulsar_ (or scroll the list) to locate the **pulsar** collector.\n5. Click the **+** next to the **pulsar** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/pulsar.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pulsar.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080/metrics\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:8080/metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080/metrics\n\n  - name: remote\n    url: http://192.0.2.1:8080/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `pulsar` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m pulsar\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m pulsar -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pulsar` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pulsar\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pulsar /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pulsar\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- topic_* metrics are available when `exposeTopicLevelMetricsInPrometheus` is set to true.\n- subscription_* and namespace_subscription metrics are available when `exposeTopicLevelMetricsInPrometheus` si set to true.\n- replication_* and namespace_replication_* metrics are available when replication is configured and `replicationMetricsEnabled` is set to true.\n\n\n### Per Apache Pulsar instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.broker_components | namespaces, topics, subscriptions, producers, consumers | components |\n| pulsar.messages_rate | publish, dispatch | messages/s |\n| pulsar.throughput_rate | publish, dispatch | KiB/s |\n| pulsar.storage_size | used | KiB |\n| pulsar.storage_operations_rate | read, write | message batches/s |\n| pulsar.msg_backlog | backlog | messages |\n| pulsar.storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.subscription_delayed | delayed | message batches |\n| pulsar.subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.replication_rate | in, out | messages/s |\n| pulsar.replication_throughput_rate | in, out | KiB/s |\n| pulsar.replication_backlog | backlog | messages |\n\n### Per namespace\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.namespace_broker_components | topics, subscriptions, producers, consumers | components |\n| pulsar.namespace_messages_rate | publish, dispatch | messages/s |\n| pulsar.namespace_throughput_rate | publish, dispatch | KiB/s |\n| pulsar.namespace_storage_size | used | KiB |\n| pulsar.namespace_storage_operations_rate | read, write | message batches/s |\n| pulsar.namespace_msg_backlog | backlog | messages |\n| pulsar.namespace_storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.namespace_entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.namespace_subscription_delayed | delayed | message batches |\n| pulsar.namespace_subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.namespace_subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.namespace_replication_rate | in, out | messages/s |\n| pulsar.namespace_replication_throughput_rate | in, out | KiB/s |\n| pulsar.namespace_replication_backlog | backlog | messages |\n| pulsar.topic_producers | a dimension per topic | producers |\n| pulsar.topic_subscriptions | a dimension per topic | subscriptions |\n| pulsar.topic_consumers | a dimension per topic | consumers |\n| pulsar.topic_messages_rate_in | a dimension per topic | publishes/s |\n| pulsar.topic_messages_rate_out | a dimension per topic | dispatches/s |\n| pulsar.topic_throughput_rate_in | a dimension per topic | KiB/s |\n| pulsar.topic_throughput_rate_out | a dimension per topic | KiB/s |\n| pulsar.topic_storage_size | a dimension per topic | KiB |\n| pulsar.topic_storage_read_rate | a dimension per topic | message batches/s |\n| pulsar.topic_storage_write_rate | a dimension per topic | message batches/s |\n| pulsar.topic_msg_backlog | a dimension per topic | messages |\n| pulsar.topic_subscription_delayed | a dimension per topic | message batches |\n| pulsar.topic_subscription_msg_rate_redeliver | a dimension per topic | messages/s |\n| pulsar.topic_subscription_blocked_on_unacked_messages | a dimension per topic | blocked subscriptions |\n| pulsar.topic_replication_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_backlog | a dimension per topic | messages |\n\n",integration_type:"collector",id:"go.d.plugin-pulsar-Apache_Pulsar",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/pulsar/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-puppet",plugin_name:"go.d.plugin",module_name:"puppet",monitored_instance:{name:"Puppet",link:"https://www.puppet.com/",categories:["data-collection.cloud-and-devops"],icon_filename:"puppet.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["puppet"]},overview:"# Puppet\n\nPlugin: go.d.plugin\nModule: puppet\n\n## Overview\n\nThis collector monitors Puppet metrics, including JVM heap and non-heap memory, CPU usage, and file descriptors.\n\n\nIt uses Puppet's metrics API endpoint [/status/v1/services](https://www.puppet.com/docs/puppetserver/5.3/status-api/v1/services.html) to gather the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Puppet instances running on localhost that are listening on port 8140.\nOn startup, it tries to collect metrics from:\n\n- https://127.0.0.1:8140\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **puppet** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **puppet**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/puppet.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | https://127.0.0.1:8140 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **puppet** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the puppet data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _puppet_ (or scroll the list) to locate the **puppet** collector.\n5. Click the **+** next to the **puppet** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/puppet.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/puppet.conf\n```\n\n##### Examples\n\n###### Basic with self-signed certificate\n\nPuppet with self-signed TLS certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:8140\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:8140\n    tls_skip_verify: yes\n\n  - name: remote\n    url: https://192.0.2.1:8140\n    tls_skip_verify: yes\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `puppet` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m puppet\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m puppet -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `puppet` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep puppet\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep puppet /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep puppet\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Puppet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| puppet.jvm_heap | committed, used | MiB |\n| puppet.jvm_nonheap | committed, used | MiB |\n| puppet.cpu | execution, GC | percentage |\n| puppet.fdopen | used | descriptors |\n\n",integration_type:"collector",id:"go.d.plugin-puppet-Puppet",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/puppet/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-rabbitmq",plugin_name:"go.d.plugin",module_name:"rabbitmq",monitored_instance:{name:"RabbitMQ",link:"https://www.rabbitmq.com/",icon_filename:"rabbitmq.svg",categories:["data-collection.databases"]},keywords:["rabbitmq","message brokers"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# RabbitMQ\n\nPlugin: go.d.plugin\nModule: rabbitmq\n\n## Overview\n\nThis collector monitors RabbitMQ instances.\n\nIt collects data using an HTTP-based API provided by the [management plugin](https://www.rabbitmq.com/management.html).\nThe following endpoints are used:\n\n- `/api/definitions` (one-time retrieval, used to obtain the cluster ID and name)\n- `/api/overview`\n- `/api/nodes`\n- `/api/vhosts`\n- `/api/queues` (disabled by default)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **rabbitmq** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **rabbitmq**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/rabbitmq.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable management plugin.\n\nThe management plugin is included in the RabbitMQ distribution, but disabled.\nTo enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://localhost:15672" | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **Metrics Selection** | collect_queues_metrics | Collect per-vhost, per-queue metrics. May cause significant overhead if many queues exist. | no | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. | guest | no |\n|  | password | Password for Basic HTTP authentication. | guest | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **rabbitmq** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the rabbitmq data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _rabbitmq_ (or scroll the list) to locate the **rabbitmq** collector.\n5. Click the **+** next to the **rabbitmq** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/rabbitmq.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rabbitmq.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:15672\n\n```\n{% /details %}\n###### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:15672\n    username: admin\n    password: password\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:15672\n\n  - name: remote\n    url: http://192.0.2.0:15672\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `rabbitmq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m rabbitmq\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m rabbitmq -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `rabbitmq` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep rabbitmq\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep rabbitmq /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep rabbitmq\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ rabbitmq_node_avail_status_down ](https://github.com/netdata/netdata/blob/master/src/health/health.d/rabbitmq.conf) | rabbitmq.node_avail_status | RabbitMQ node is down (node ${label:node} cluster ${label:cluster_id}) |\n| [ rabbitmq_node_network_partition_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/rabbitmq.conf) | rabbitmq.node_network_partition_status | RabbitMQ network partition detected (node ${label:node} cluster ${label:cluster_id}) |\n| [ rabbitmq_node_mem_alarm_status_triggered ](https://github.com/netdata/netdata/blob/master/src/health/health.d/rabbitmq.conf) | rabbitmq.node_mem_alarm_status | RabbitMQ mem alarm triggered (node ${label:node} cluster ${label:cluster_id}) |\n| [ rabbitmq.node_disk_free_alarm_status_triggered ](https://github.com/netdata/netdata/blob/master/src/health/health.d/rabbitmq.conf) | rabbitmq.node_disk_free_alarm_status | RabbitMQ disk free alarm triggered (node ${label:node} cluster ${label:cluster_id}) |\n| [ rabbitmq_vhost_status_unhealthy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/rabbitmq.conf) | rabbitmq.vhost_status | RabbitMQ vhost is not healthy (vhost ${label:vhost} cluster ${label:cluster_id}) |\n| [ rabbitmq_queue_status_minority ](https://github.com/netdata/netdata/blob/master/src/health/health.d/rabbitmq.conf) | rabbitmq.queue_status | RabbitMQ queue insufficient online members (queue ${label:queue} node ${label:node} cluster ${label:cluster_id}) |\n| [ rabbitmq_queue_status_unhealthy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/rabbitmq.conf) | rabbitmq.queue_status | RabbitMQ queue is unhealthy (queue ${label:queue} node ${label:node} cluster ${label:cluster_id}) |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cluster\n\nThese metrics refer to the RabbitMQ Cluster.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_id | Unique identifier for the cluster, automatically assigned by RabbitMQ. |\n| cluster_name | User-defined name of the cluster as set using `rabbitmqctl set_cluster_name`. If not set, it will be "unset". |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.messages_count | ready, unacknowledged | messages |\n| rabbitmq.messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_empty, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n| rabbitmq.objects_count | channels, consumers, connections, queues, exchanges | messages |\n| rabbitmq.connection_churn_rate | created, closed | operations/s |\n| rabbitmq.channel_churn_rate | created, closed | operations/s |\n| rabbitmq.queue_churn_rate | created, deleted, declared | operations/s |\n\n### Per node\n\nThese metrics refer to the RabbitMQ node.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_id | Unique identifier for the cluster, automatically assigned by RabbitMQ. |\n| cluster_name | User-defined name of the cluster as set using `rabbitmqctl set_cluster_name <NAME>`. If not set, it will be "unset". |\n| node | Name of the node. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.node_avail_status | running, down | status |\n| rabbitmq.node_network_partition_status | clear, detected | status |\n| rabbitmq.node_mem_alarm_status | clear, triggered | status |\n| rabbitmq.node_disk_free_alarm_status | clear, triggered | status |\n| rabbitmq.node_file_descriptors_usage | used | fd |\n| rabbitmq.node_sockets_usage | used | sockets |\n| rabbitmq.node_erlang_processes_usage | used | processes |\n| rabbitmq.node_erlang_run_queue_processes_count | length | processes |\n| rabbitmq.node_memory_usage | used | bytes |\n| rabbitmq.node_disk_space_free_size | free | bytes |\n| rabbitmq.node_uptime | uptime | seconds |\n\n### Per cluster peer\n\nThese metrics refer to the RabbiMQ cluster peer.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_id | Unique identifier for the cluster, automatically assigned by RabbitMQ. |\n| cluster_name | User-defined name of the cluster as set using `rabbitmqctl set_cluster_name <NAME>`. If not set, it will be "unset". |\n| node | Name of the node. |\n| peer | Name of the remote node in the cluster. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.node_peer_cluster_link_traffic | received, sent | bytes/s |\n\n### Per vhost\n\nThese metrics refer to the virtual host.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_id | Unique identifier for the cluster, automatically assigned by RabbitMQ. |\n| cluster_name | User-defined name of the cluster as set using `rabbitmqctl set_cluster_name <NAME>`. If not set, it will be "unset". |\n| vhost | Name of the virtual host. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.vhost_status | running, stopped, partial | status |\n| rabbitmq.vhost_messages_count | ready, unacknowledged | messages |\n| rabbitmq.vhost_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n### Per queue\n\nThese metrics refer to the virtual host queue.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cluster_id | Unique identifier for the cluster, automatically assigned by RabbitMQ. |\n| cluster_name | User-defined name of the cluster as set using `rabbitmqctl set_cluster_name <NAME>`. If not set, it will be "unset". |\n| node | Name of the node. |\n| vhost | Name of the virtual host. |\n| queue | Name of the queue. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.queue_status | running, down, idle, crashed, stopped, minority, terminated | status |\n| rabbitmq.queue_messages_count | ready, unacknowledged, paged_out, persistent | messages |\n| rabbitmq.queue_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n',integration_type:"collector",id:"go.d.plugin-rabbitmq-RabbitMQ",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/rabbitmq/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-redis",plugin_name:"go.d.plugin",module_name:"redis",monitored_instance:{name:"Redis",link:"https://redis.com/",categories:["data-collection.databases"],icon_filename:"redis.svg"},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Containers"}]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["redis","databases"]},overview:'# Redis\n\nPlugin: go.d.plugin\nModule: redis\n\n## Overview\n\nThis collector monitors the health and performance of Redis servers and collects general statistics, CPU and memory consumption, replication information, command statistics, and more.\n\n\nIt connects to the Redis instance via a TCP or UNIX socket and executes the following commands:\n\n- [INFO ALL](https://redis.io/commands/info)\n- [PING](https://redis.io/commands/ping/)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nRedis can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Containers" %}Containers{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known Redis TCP and UNIX sockets:\n\n- 127.0.0.1:6379\n- /tmp/redis.sock\n- /var/run/redis/redis.sock\n- /var/lib/redis/redis.sock\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:"## Setup\n\n\nYou can configure the **redis** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **redis**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/redis.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | Redis server address (TCP or Unix socket). | redis://@localhost:6379 | yes |\n|  | timeout | Dial, read, and write timeout (seconds). | 1 | no |\n| **Auth** | username | Username for authentication. |  | no |\n|  | password | Password for authentication. |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Functions** | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **redis** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the redis data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _redis_ (or scroll the list) to locate the **redis** collector.\n5. Click the **+** next to the **redis** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/redis.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/redis.conf\n```\n\n##### Examples\n\n###### TCP socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    address: 'redis://@127.0.0.1:6379'\n\n```\n{% /details %}\n###### Unix socket\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    address: 'unix://@/tmp/redis.sock'\n\n```\n{% /details %}\n###### TCP socket with password\n\nAn example configuration.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    address: 'redis://:password@127.0.0.1:6379'\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    address: 'redis://:password@127.0.0.1:6379'\n\n  - name: remote\n    address: 'redis://user:password@203.0.113.0:6379'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `redis` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m redis\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m redis -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `redis` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep redis\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep redis /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep redis\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ redis_connections_rejected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.connections | connections rejected because of maxclients limit in the last minute |\n| [ redis_bgsave_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_now | duration of the on-going RDB save operation |\n| [ redis_bgsave_broken ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_health | status of the last RDB save operation (0: ok, 1: error) |\n| [ redis_master_link_down ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.master_link_down_since_time | time elapsed since the link between master and slave is down |\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves slow command entries from Redis [SLOWLOG](https://redis.io/docs/latest/commands/slowlog/).\n\nThis function executes the `SLOWLOG GET` command to retrieve entries of commands that exceeded the configured execution time threshold (`slowlog-log-slower-than`). It provides command details, execution duration, and client information for each slow command.\n\nUse cases:\n- Identify slow commands that may need optimization\n- Analyze command patterns to detect performance hotspots\n- Investigate client sources of slow commands\n\nCommand text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Redis:top-queries` |\n| Require Cloud | yes |\n| Performance | Executes `SLOWLOG GET` command to retrieve entries from Redis memory:<br/>\u2022 Minimal overhead as SLOWLOG is stored in memory<br/>\u2022 Default limit of 500 entries balances completeness with performance<br/>\u2022 Large slowlogs with many entries may take slightly longer to transfer |\n| Security | Command arguments may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Redis keys and values in command arguments<br/>\u2022 Application-specific identifiers or session tokens<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to Redis<br/>\u2022 SLOWLOG is enabled (`slowlog-log-slower-than` > 0)<br/>\u2022 Returns HTTP 503 if collector is still initializing<br/>\u2022 Returns HTTP 500 if the command fails<br/>\u2022 Returns HTTP 504 if the command times out |\n\n#### Prerequisites\n\nNo additional configuration is required.\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Options include duration, timestamp, ID, and command name. Defaults to duration to focus on slowest commands. | yes | duration |  |\n\n#### Returns\n\nSlowlog entries with command timing and client metadata, providing insight into Redis performance patterns. Each row represents a single slow command execution that exceeded the configured threshold.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| ID | integer |  | hidden | Unique identifier for the slowlog entry. Allows tracking individual command executions. |\n| Timestamp | timestamp |  |  | Date and time when the slow command was executed. Useful for correlating slow commands with application events or system changes. |\n| Command | string |  |  | Full command text including all arguments. May contain sensitive data (keys, values) depending on application implementation. Truncated to 4096 characters. |\n| Command Name | string |  |  | The Redis command name (e.g., SET, GET, HGETALL, ZADD). Useful for grouping and analyzing slow commands by type. |\n| Duration | duration | milliseconds |  | Execution time that exceeded the slowlog threshold. Higher values indicate slower commands that may need optimization or investigation. |\n| Client Address | string |  | hidden | IP address of the client that executed the slow command. Useful for identifying problematic clients or network segments. |\n| Client Name | string |  | hidden | Client identifier or name reported by Redis. Useful for identifying specific applications or services generating slow commands. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Redis instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| redis.connections | accepted, rejected | connections/s |\n| redis.clients | connected, blocked, tracking, in_timeout_table | clients |\n| redis.ping_latency | min, max, avg | seconds |\n| redis.commands | processes | commands/s |\n| redis.keyspace_lookup_hit_rate | lookup_hit_rate | percentage |\n| redis.memory | max, used, rss, peak, dataset, lua, scripts | bytes |\n| redis.mem_fragmentation_ratio | mem_fragmentation | ratio |\n| redis.key_eviction_events | evicted | keys/s |\n| redis.net | received, sent | kilobits/s |\n| redis.rdb_changes | changes | operations |\n| redis.bgsave_now | current_bgsave_time | seconds |\n| redis.bgsave_health | last_bgsave | status |\n| redis.bgsave_last_rdb_save_since_time | last_bgsave_time | seconds |\n| redis.aof_file_size | current, base | bytes |\n| redis.commands_calls | a dimension per command | calls |\n| redis.commands_usec | a dimension per command | microseconds |\n| redis.commands_usec_per_sec | a dimension per command | microseconds/s |\n| redis.key_expiration_events | expired | keys/s |\n| redis.database_keys | a dimension per database | keys |\n| redis.database_expires_keys | a dimension per database | keys |\n| redis.connected_replicas | connected | replicas |\n| redis.master_link_status | up, down | status |\n| redis.master_last_io_since_time | time | seconds |\n| redis.master_link_down_since_time | time | seconds |\n| redis.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-redis-Redis",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/redis/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-rethinkdb",plugin_name:"go.d.plugin",module_name:"rethinkdb",monitored_instance:{name:"RethinkDB",link:"https://rethinkdb.com",categories:["data-collection.databases"],icon_filename:"rethinkdb.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["rethinkdb","database","db"]},overview:"# RethinkDB\n\nPlugin: go.d.plugin\nModule: rethinkdb\n\n## Overview\n\nIt collects cluster-wide metrics such as server status, client connections, active clients, query rate, and document read/write rates.\nFor each server, it offers similar metrics.\n\n\nThe data is gathered by querying the stats table in RethinkDB, which stores real-time statistics related to the cluster and its individual servers.\nIt also provides a `running-queries` function using the `rethinkdb.jobs` system table (admin-only).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, collector will attempt to connect to RethinkDB instance on `127.0.0.1:28015` address.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **rethinkdb** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **rethinkdb**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/rethinkdb.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | RethinkDB server address (IP:PORT). | 127.0.0.1:28015 | yes |\n|  | timeout | Connection, read, and write timeout duration (seconds). Includes name resolution. | 1 | no |\n| **Auth** | username | Username for authentication. |  | no |\n|  | password | Password for authentication. |  | no |\n| **Functions** | functions.running_queries.disabled | Disable the [running-queries](#running-queries) function. | no | no |\n|  | functions.running_queries.timeout | Timeout for the running-queries function query (seconds). If not set, uses the collector\'s timeout. | (collector timeout) | no |\n|  | functions.running_queries.limit | Maximum number of rows returned by the running-queries function. | 500 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **rethinkdb** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the rethinkdb data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _rethinkdb_ (or scroll the list) to locate the **rethinkdb** collector.\n5. Click the **+** next to the **rethinkdb** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/rethinkdb.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rethinkdb.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:28015\n\n```\n{% /details %}\n###### With authentication\n\nAn example configuration with authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:28015\n    username: name\n    password: pass\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:28015\n\n  - name: remote\n    address: 203.0.113.0:28015\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `rethinkdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m rethinkdb\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m rethinkdb -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `rethinkdb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep rethinkdb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep rethinkdb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep rethinkdb\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Running Queries\n\nRetrieves currently executing queries from the RethinkDB [rethinkdb.jobs](https://rethinkdb.com/docs/system-jobs/) system table.\n\nThis function queries the `rethinkdb.jobs` system table which contains information about background tasks and queries currently running on the cluster. It provides query text, execution duration, client information, and involved servers.\n\nUse cases:\n- Identify long-running queries that may be blocking resources\n- Monitor active query load across the cluster\n- Investigate client connections generating heavy workloads\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Rethinkdb:running-queries` |\n| Require Cloud | yes |\n| Performance | Queries the `rethinkdb.jobs` system table:<br/>\u2022 Minimal overhead as it reads from an in-memory system table<br/>\u2022 Default limit of 500 rows balances completeness with performance<br/>\u2022 Returns only currently active queries (no historical data) |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Document field values in filter conditions<br/>\u2022 User-provided data in insert/update operations<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to RethinkDB<br/>\u2022 The user has admin access to `rethinkdb.jobs` table<br/>\u2022 Returns HTTP 503 if collector is still initializing<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Grant admin access to `rethinkdb.jobs`\n\nThe user must have admin privileges to query the `rethinkdb.jobs` system table.\n\n1. Connect with an admin user account that has access to system tables\n\n2. Verify access to `rethinkdb.jobs`:\n\n   ```javascript\n   r.db('rethinkdb').table('jobs').run(conn)\n   ```\n\n:::info\n\n- The `rethinkdb.jobs` table is only accessible to admin users\n- Non-admin users will receive a permission error when attempting to query this table\n- The collector's regular metrics do not require admin access\n\n:::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Defaults to duration to focus on longest-running queries. | yes | durationMs |  |\n\n#### Returns\n\nCurrently running queries from the `rethinkdb.jobs` system table. Each row represents a single active query with its execution context.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Job ID | string |  | hidden | Unique identifier for the job entry. Can be used to track or kill specific queries. |\n| Query | string |  |  | The ReQL query text being executed. Truncated to 4096 characters. May contain literal values from application code. |\n| Duration | duration | milliseconds |  | Time elapsed since the query started executing. High values indicate long-running queries that may need investigation. |\n| Type | string |  |  | Job type (e.g., query, index_construction, disk_compaction). Useful for distinguishing user queries from background tasks. |\n| User | string |  |  | RethinkDB user account that initiated the query. Useful for identifying workload by user or application. |\n| Client Address | string |  | hidden | IP address of the client connection that submitted the query. |\n| Client Port | integer |  | hidden | Port number of the client connection. |\n| Servers | string |  | hidden | Comma-separated list of servers involved in executing this query. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RethinkDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.cluster_servers_stats_request | success, timeout | servers |\n| rethinkdb.cluster_client_connections | connections | connections |\n| rethinkdb.cluster_active_clients | active | clients |\n| rethinkdb.cluster_queries | queries | queries/s |\n| rethinkdb.cluster_documents | read, written | documents/s |\n\n### Per server\n\nThese metrics refer to the server (cluster member).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| server_uuid | Server UUID. |\n| server_name | Server name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.server_stats_request_status | success, timeout | status |\n| rethinkdb.server_client_connections | connections | connections |\n| rethinkdb.server_active_clients | active | clients |\n| rethinkdb.server_queries | queries | queries/s |\n| rethinkdb.server_documents | read, written | documents/s |\n\n",integration_type:"collector",id:"go.d.plugin-rethinkdb-RethinkDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/rethinkdb/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-riakkv",plugin_name:"go.d.plugin",module_name:"riakkv",monitored_instance:{name:"Riak KV",link:"https://riak.com/products/riak-kv/index.html",categories:["data-collection.databases"],icon_filename:"riak.svg"},related_resources:{integrations:{list:[]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["database","nosql","big data"]},overview:"# Riak KV\n\nPlugin: go.d.plugin\nModule: riakkv\n\n## Overview\n\nThis collector monitors RiakKV metrics about throughput, latency, resources and more.\n\n\nIt sends HTTP requests to the Riak [/stats](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html) endpoint.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Riak instances running on localhost that are listening on port 8098.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:8098/stats\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **riakkv** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **riakkv**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/riakkv.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable /stats endpoint\n\nSee the RiakKV [configuration reference](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 2 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8098/stats | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **riakkv** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the riakkv data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _riakkv_ (or scroll the list) to locate the **riakkv** collector.\n5. Click the **+** next to the **riakkv** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/riakkv.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/riakkv.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8098/stats\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8098/stats\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nWith enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8098/stats\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8098/stats\n\n  - name: remote\n    url: http://192.0.2.1:8098/stats\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `riakkv` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m riakkv\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m riakkv -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `riakkv` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep riakkv\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep riakkv /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep riakkv\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Riak KV instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| riak.kv.throughput | gets, puts | operations/s |\n| riak.dt.vnode_updates | counters, sets, maps | operations/s |\n| riak.search | queries | queries/s |\n| riak.search.documents | indexed | documents/s |\n| riak.consistent.operations | gets, puts | operations/s |\n| riak.kv.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.kv.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.counter_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.set_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.map_merge | mean, median, 95, 99, 100 | ms |\n| riak.search.latency.query | median, min, 95, 99, 999, max | ms |\n| riak.search.latency.index | median, min, 95, 99, 999, max | ms |\n| riak.consistent.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.consistent.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.vm | processes | total |\n| riak.vm.memory.processes | allocated, used | MB |\n| riak.kv.siblings_encountered.get | mean, median, 95, 99, 100 | siblings |\n| riak.kv.objsize.get | mean, median, 95, 99, 100 | KB |\n| riak.search.vnodeq_size | mean, median, 95, 99, 100 | messages |\n| riak.search.index | index_fail, bad_entry, extract_fail | errors |\n| riak.core.protobuf_connections | active | connections |\n| riak.core.repairs | read | repairs |\n| riak.core.fsm_active | get, put, secondary index, list keys | fsms |\n| riak.core.fsm_rejected | get, put | fsms |\n\n",integration_type:"collector",id:"go.d.plugin-riakkv-Riak_KV",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/riakkv/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-rspamd",plugin_name:"go.d.plugin",module_name:"rspamd",monitored_instance:{name:"Rspamd",link:"https://rspamd.com/",categories:["data-collection.applications"],icon_filename:"globe.svg"},related_resources:{integrations:{list:[{plugin_name:"go.d.plugin",module_name:"httpcheck"},{plugin_name:"apps.plugin",module_name:"apps"}]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["spam","rspamd","email"]},overview:'# Rspamd\n\nPlugin: go.d.plugin\nModule: rspamd\n\n## Overview\n\nThis collector monitors the activity and performance of Rspamd servers. It gathers various metrics including scanned emails, learned messages, spam/ham counts, and actions taken on emails (reject, rewrite, etc.).\n\n\nIt retrieves statistics from Rspamd\'s [built-in web server](https://rspamd.com/doc/workers/controller.html) by making HTTP requests to the `/stat` endpoint.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nRspamd can be monitored further using the following other integrations:\n\n- {% relatedResource id="go.d.plugin-httpcheck-HTTP_Endpoints" %}HTTP Endpoints{% /relatedResource %}\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Rspamd instances running on localhost that are listening on port 11334.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **rspamd** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **rspamd**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/rspamd.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:11334 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **rspamd** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the rspamd data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _rspamd_ (or scroll the list) to locate the **rspamd** collector.\n5. Click the **+** next to the **rspamd** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/rspamd.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rspamd.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:11334\n\n```\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:11334\n    username: username\n    password: password\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:11334\n\n  - name: remote\n    url: http://192.0.2.1:11334\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `rspamd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m rspamd\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m rspamd -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `rspamd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep rspamd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep rspamd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep rspamd\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Rspamd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rspamd.classifications | ham, spam | messages/s |\n| rspamd.actions | reject, soft_reject, rewrite_subject, add_header, greylist, custom, discard, quarantine, no_action | messages/s |\n| rspamd.scans | scanned | messages/s |\n| rspamd.learns | learned | messages/s |\n| rspamd.connections | connections | connections/s |\n| rspamd.control_connections | control_connections | connections/s |\n\n",integration_type:"collector",id:"go.d.plugin-rspamd-Rspamd",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/rspamd/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-samba",plugin_name:"go.d.plugin",module_name:"samba",monitored_instance:{name:"Samba",link:"https://www.samba.org/samba/",icon_filename:"samba.svg",categories:["data-collection.storage"]},keywords:["samba","smb","file sharing"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Samba\n\nPlugin: go.d.plugin\nModule: samba\n\n## Overview\n\nThis collector monitors Samba syscalls and SMB2 calls. It relies on the [`smbstatus`](https://www.samba.org/samba/docs/current/man-html/smbstatus.1.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\nExecuted commands:\n- `smbstatus -P`\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **samba** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **samba**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/samba.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Verifying and Enabling Profiling for SMBd\n\n1. **Check for Profiling Support**\n\n    Before enabling profiling, it\'s important to verify if `smbd` was compiled with profiling capabilities. Run the following command as root user (using `sudo`) to check:\n\n    ```bash\n    $ sudo smbd --build-options | grep WITH_PROFILE\n    WITH_PROFILE\n    ```\n\n    If the command outputs `WITH_PROFILE`, profiling is supported. If not, you\'ll need to recompile `smbd` with profiling enabled (refer to Samba documentation for specific instructions).\n\n2. **Enable Profiling**\n\n    Once you\'ve confirmed profiling support, you can enable it using one of the following methods:\n\n    - **Command-Line Option**\n        Start smbd with the `-P 1` option when invoking it directly from the command line.\n    - **Configuration File**\n        Modify the `smb.conf` configuration file located at `/etc/samba/smb.conf` (the path might vary slightly depending on your system). Add the following line to the `[global]` section:\n\n        ```bash\n        smbd profiling level = count\n        ```\n3. **Restart the Samba service**\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | smbstatus binary execution timeout. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **samba** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the samba data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _samba_ (or scroll the list) to locate the **samba** collector.\n5. Click the **+** next to the **samba** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/samba.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/samba.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: samba\n    update_every: 5  # Collect statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `samba` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m samba\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m samba -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `samba` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep samba\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep samba /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep samba\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per syscall\n\nThese metrics refer to the the Syscall.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| syscall | Syscall name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| samba.syscall_calls | syscalls | calls/s |\n| samba.syscall_transferred_data | transferred | bytes/s |\n\n### Per smb2call\n\nThese metrics refer to the the SMB2 Call.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| smb2call | SMB2 call name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| samba.smb2_call_calls | smb2 | calls/s |\n| samba.smb2_call_transferred_data | in, out | bytes/s |\n\n",integration_type:"collector",id:"go.d.plugin-samba-Samba",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/samba/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-scaleio",plugin_name:"go.d.plugin",module_name:"scaleio",monitored_instance:{name:"Dell EMC ScaleIO",link:"https://www.dell.com/en-ca/dt/storage/scaleio/scaleioreadynode.htm",icon_filename:"dell.svg",categories:["data-collection.storage"]},keywords:["scaleio"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Dell EMC ScaleIO\n\nPlugin: go.d.plugin\nModule: scaleio\n\n## Overview\n\nThis collector monitors ScaleIO (VxFlex OS) instances via VxFlex OS Gateway API.\n\nIt collects metrics for the following ScaleIO components:\n\n- System\n- Storage Pool\n- Sdc\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **scaleio** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **scaleio**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/scaleio.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | https://127.0.0.1 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **scaleio** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the scaleio data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _scaleio_ (or scroll the list) to locate the **scaleio** collector.\n5. Click the **+** next to the **scaleio** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/scaleio.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/scaleio.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1\n    username: admin\n    password: password\n    tls_skip_verify: yes  # self-signed certificate\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instance.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1\n    username: admin\n    password: password\n    tls_skip_verify: yes  # self-signed certificate\n\n  - name: remote\n    url: https://203.0.113.10\n    username: admin\n    password: password\n    tls_skip_verify: yes\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `scaleio` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m scaleio\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m scaleio -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `scaleio` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep scaleio\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep scaleio /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep scaleio\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dell EMC ScaleIO instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.system_capacity_total | total | KiB |\n| scaleio.system_capacity_in_use | in_use | KiB |\n| scaleio.system_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.system_capacity_available_volume_allocation | available | KiB |\n| scaleio.system_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.system_workload_primary_bandwidth_total | total | KiB/s |\n| scaleio.system_workload_primary_bandwidth | read, write | KiB/s |\n| scaleio.system_workload_primary_iops_total | total | iops/s |\n| scaleio.system_workload_primary_iops | read, write | iops/s |\n| scaleio.system_workload_primary_io_size_total | io_size | KiB |\n| scaleio.system_rebalance | read, write | KiB/s |\n| scaleio.system_rebalance_left | left | KiB |\n| scaleio.system_rebalance_time_until_finish | time | seconds |\n| scaleio.system_rebuild | read, write | KiB/s |\n| scaleio.system_rebuild_left | left | KiB |\n| scaleio.system_defined_components | devices, fault_sets, protection_domains, rfcache_devices, sdc, sds, snapshots, storage_pools, volumes, vtrees | components |\n| scaleio.system_components_volumes_by_type | thick, thin | volumes |\n| scaleio.system_components_volumes_by_mapping | mapped, unmapped | volumes |\n\n### Per storage pool\n\nThese metrics refer to the storage pool.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.storage_pool_capacity_total | total | KiB |\n| scaleio.storage_pool_capacity_in_use | in_use | KiB |\n| scaleio.storage_pool_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.storage_pool_capacity_utilization | used | percentage |\n| scaleio.storage_pool_capacity_available_volume_allocation | available | KiB |\n| scaleio.storage_pool_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.storage_pool_components | devices, snapshots, volumes, vtrees | components |\n\n### Per sdc\n\nThese metrics refer to the SDC (ScaleIO Data Client).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.sdc_mdm_connection_state | connected | boolean |\n| scaleio.sdc_bandwidth | read, write | KiB/s |\n| scaleio.sdc_iops | read, write | iops/s |\n| scaleio.sdc_io_size | read, write | KiB |\n| scaleio.sdc_num_of_mapped_volumed | mapped | volumes |\n\n",integration_type:"collector",id:"go.d.plugin-scaleio-Dell_EMC_ScaleIO",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/scaleio/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-sensors",plugin_name:"go.d.plugin",module_name:"sensors",monitored_instance:{name:"Linux Sensors",link:"https://hwmon.wiki.kernel.org/lm_sensors",icon_filename:"microchip.svg",categories:["data-collection.hardware-and-sensors"]},keywords:["sensors","temperature","voltage","current","power","fan","energy","humidity","intrusion"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Linux Sensors\n\nPlugin: go.d.plugin\nModule: sensors\n\n## Overview\n\nThis collector gathers real-time system sensor statistics using the [sysfs](https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface) interface.\n\nSupported sensors:\n\n- Temperature\n- Voltage\n- Fan\n- Current\n- Power\n- Energy\n- Humidity\n- Intrusion\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAutomatically discovers and exposes all available sensors on the system through the [sysfs](https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface) interface.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\nYou can configure the **sensors** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **sensors**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/sensors.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| relabel | A list used to update existing sensor labels or add labels to sensors that don't have them. | [] | no |\n| relabel[].chip | [Pattern](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns) to match the `chip_id` label value. |  | no |\n| relabel[].sensors | A list of sensors to be relabeled for the specified chip. | [] | no |\n| relabel[].sensors[].name | The exact sensor name (e.g., `'temp1'`, `'in1'`, `'voltage1'`). |  | no |\n| relabel[].sensors[].label | The new label value for the sensor. |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **sensors** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the sensors data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _sensors_ (or scroll the list) to locate the **sensors** collector.\n5. Click the **+** next to the **sensors** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/sensors.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/sensors.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: sensors\n    update_every: 5  # Collect sensors statistics every 5 seconds\n\n```\n{% /details %}\n###### Renaming labels\n\nAllows you to override/add labels.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: sensors\n    relabel:\n      - chip: as99127f-*\n        sensors:\n          - name: temp1\n            label: Mobo Temp\n          - name: temp2\n            label: CPU0 Temp\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `sensors` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m sensors\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m sensors -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `sensors` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep sensors\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep sensors /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep sensors\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the system sensor.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| chip | The path to the sensor's chip device, excluding the /sys/devices prefix. This provides a unique identifier for the physical hardware component. |\n| chip_id | A unique identifier for the sensor's chip, formatted as `chipName-busType-hash`. |\n| sensor | The name of the specific sensor within the chip device. This provides a direct identifier for the individual measurement point. |\n| label | A label provided by the kernel driver to indicate the intended use or purpose of the sensor. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.chip_sensor_temperature | input | Celsius |\n| sensors.chip_sensor_temperature_alarm | clear, triggered | status |\n| sensors.chip_sensor_voltage | input | Volts |\n| sensors.chip_sensor_voltage_average | average | Volts |\n| sensors.chip_sensor_voltage_alarm | clear, triggered | status |\n| sensors.chip_sensor_fan | input | RPM |\n| sensors.chip_sensor_fan_alarm | clear, triggered | status |\n| sensors.chip_sensor_current | input | Amperes |\n| sensors.chip_sensor_current_average | average | Amperes |\n| sensors.chip_sensor_current_alarm | clear, triggered | status |\n| sensors.chip_sensor_power | input | Watts |\n| sensors.chip_sensor_power_average | average | Watts |\n| sensors.chip_sensor_power_alarm | clear, triggered | status |\n| sensors.chip_sensor_energy | input | Joules |\n| sensors.chip_sensor_humidity | input | percent |\n| sensors.chip_sensor_intrusion_alarm | clear, triggered | status |\n\n",integration_type:"collector",id:"go.d.plugin-sensors-Linux_Sensors",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/sensors/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-smartctl",plugin_name:"go.d.plugin",module_name:"smartctl",monitored_instance:{name:"S.M.A.R.T.",link:"https://linux.die.net/man/8/smartd",icon_filename:"smart.png",categories:["data-collection.storage"]},keywords:["smart","S.M.A.R.T.","SCSI devices","ATA devices"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# S.M.A.R.T.\n\nPlugin: go.d.plugin\nModule: smartctl\n\n## Overview\n\nThis collector monitors the health status of storage devices by analyzing S.M.A.R.T. (Self-Monitoring, Analysis, and Reporting Technology) counters.\nIt relies on the [`smartctl`](https://linux.die.net/man/8/smartctl) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n-  `smartctl --json --scan`\n-  `smartctl --json --all {deviceName} --device {deviceType} --nocheck {powerMode}`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **smartctl** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **smartctl**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/smartctl.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Install smartmontools (v7.0+)\n\nInstall `smartmontools` version 7.0 or later using your distribution\'s package manager. Version 7.0 introduced the `--json` output mode, which is required for this collector to function properly.\n\n\n#### For Netdata running in a Docker container\n\n**Provide access to storage devices**.\n\nNetdata requires the `SYS_RAWIO` capability and access to the storage devices to run the `smartctl` collector inside a Docker container. Here\'s how you can achieve this:\n\n- `docker run`\n\n  ```bash\n  docker run --cap-add SYS_RAWIO --device /dev/sda:/dev/sda ...\n  ```\n\n- `docker-compose.yml`\n\n  ```yaml\n  services:\n    netdata:\n      cap_add:\n        - SYS_PTRACE\n        - SYS_ADMIN\n        - SYS_RAWIO # smartctl\n      devices:\n        - "/dev/sda:/dev/sda"\n  ```\n\n> **Multiple Devices**: These examples only show mapping of one device (/dev/sda). You\'ll need to add additional `--device` options (in docker run) or entries in the `devices` list (in docker-compose.yml) for each storage device you want Netdata\'s smartctl collector to monitor.\n\n> **NVMe Devices**: Do not map NVMe devices using this method. Netdata uses a [dedicated collector](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/collector/nvme#readme) to monitor NVMe devices.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Netdata chart update interval (seconds). Collector may use cached data if this is less than **poll_devices_every**. | 10 | no |\n|  | timeout | `smartctl` binary execution timeout (seconds). | 5 | no |\n|  | scan_every | Device discovery interval using `smartctl --scan` (seconds). Set 0 to scan only once at startup. | 900 | no |\n|  | poll_devices_every | Device polling interval (seconds). Data is cached for this interval. | 300 | no |\n| **Target** | device_selector | Pattern to match the \'info name\' of devices as reported by `smartctl --scan --json`. | * | no |\n|  | extra_devices | Manually specify devices not auto-detected by `smartctl --scan`. Each entry must include both a name and a type. | [] | no |\n| **Performance** | concurrent_scans | Number of devices to scan concurrently. Set 0 for sequential scanning (default). Helps performance when monitoring many devices. | 0 | no |\n|  | [no_check_power_mode](#option-performance-no-check-power-mode) | Skip data collection when device is in low-power mode (avoids unnecessary spin-up). | standby | no |\n\n<a id="option-performance-no-check-power-mode"></a>\n##### no_check_power_mode\n\nValid arguments:\n\n| Mode    | Description                                                                                                                                                                            |\n|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| never   | Check the device always.                                                                                                                                                               |\n| sleep   | Skip check if device is in SLEEP mode.                                                                                                                                                 |\n| standby | Skip check if device is in SLEEP or STANDBY mode (prevents spin-up).                                                                                                                   |\n| idle    | Skip check if device is in SLEEP, STANDBY, or IDLE mode (not recommended since disks may still be spinning).                                                                           |\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **smartctl** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the smartctl data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _smartctl_ (or scroll the list) to locate the **smartctl** collector.\n5. Click the **+** next to the **smartctl** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/smartctl.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/smartctl.conf\n```\n\n##### Examples\n\n###### Custom devices poll interval\n\nAllows you to override the default devices poll interval (data collection).\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: smartctl\n    devices_poll_interval: 60  # Collect S.M.A.R.T statistics every 60 seconds\n\n```\n{% /details %}\n###### Concurrent scanning for multiple devices\n\nThis example demonstrates enabling concurrent scanning to improve performance when monitoring many devices.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: smartctl\n    concurrent_scans: 4  # Scan up to 4 devices concurrently\n\n```\n{% /details %}\n###### Extra devices\n\nThis example demonstrates using `extra_devices` to manually add a storage device (`/dev/sdc`) not automatically detected by `smartctl --scan`.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: smartctl\n    extra_devices:\n      - name: /dev/sdc\n        type: jmb39x-q,3\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `smartctl` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m smartctl\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m smartctl -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `smartctl` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep smartctl\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep smartctl /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep smartctl\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Storage Device.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device_name | Device name |\n| device_type | Device type |\n| model_name | Model name |\n| serial_number | Serial number |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| smartctl.device_smart_status | passed, failed | status |\n| smartctl.device_ata_smart_error_log_count | error_log | logs |\n| smartctl.device_power_on_time | power_on_time | seconds |\n| smartctl.device_temperature | temperature | Celsius |\n| smartctl.device_power_cycles_count | power | cycles |\n| smartctl.device_read_errors_rate | corrected, uncorrected | errors/s |\n| smartctl.device_write_errors_rate | corrected, uncorrected | errors/s |\n| smartctl.device_verify_errors_rate | corrected, uncorrected | errors/s |\n| smartctl.device_smart_attr_{attribute_name} | {attribute_name} | {attribute_unit} |\n| smartctl.device_smart_attr_{attribute_name}_normalized | {attribute_name} | value |\n\n",integration_type:"collector",id:"go.d.plugin-smartctl-S.M.A.R.T.",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/smartctl/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-snmp",plugin_name:"go.d.plugin",module_name:"snmp",monitored_instance:{name:"SNMP devices",link:"",icon_filename:"SNMP.png",categories:["data-collection.networking"]},keywords:["snmp","mib","oid","network","router","switch","firewall","ap","access point","wireless controller","wlc","wifi","vpn","pdu","ups","nas","san","printer","bgp","ospf","ucd","3com","a10","alcatel","lucent","nokia","anue","apc","netbotz","arista","aruba","audiocodes","avaya","avocent","avtech","roomalert","barracuda","bluecat","brocade","brother","chatsworth","checkpoint","chrysalis","cisco","cisco asa","cisco asr","cisco catalyst","cisco nexus","cisco ironport","cisco ics","cisco wlc","cisco ucs","meraki","citrix","netscaler","cradlepoint","cyberpower","dell","dell emc","poweredge","sonicwall","dialogic","dlink","d-link","eaton","exagrid","extreme","f5","big-ip","fireeye","fortinet","fortigate","fortiswitch","gigamon","hp","hewlett packard","hp ilo","ilo","ilo4","hp h3c","hp icf","hpe","proliant","huawei","3com huawei","ibm","datapower","lenovo","idrac","dell idrac","infinera","coriant","infoblox","isilon","ixsystems","truenas","juniper","junos","kyocera","linksys","mcafee","mikrotik","nasuni","nec","net-snmp","netsnmp","netapp","netgear","readynas","omron","opengear","palo alto","cloudgenix","peplink","raritan","riverbed","ruckus","serveriron","server-iron","servertech","silverpeak","silver peak","edgeconnect","sinetica","sophos","synology","diskstation","tp-link","tplink","tripplite","tripp lite","ubiquiti","unifi","velocloud","vertiv","liebert","watchguard","western digital","wd","mycloud","zebra","zyxel"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# SNMP devices\n\nPlugin: go.d.plugin\nModule: snmp\n\n## Overview\n\nThis collector discovers and monitors any SNMP-enabled network device.\n\n- **Built-in vendor profiles**: Netdata ships with a [large library of profiles](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/config/go.d/snmp.profiles/default) for major vendors, enabling automatic, out-of-the-box monitoring\u2014**no manual OID configuration needed** for common hardware.\n- **Custom profiles supported**: Users can extend or override stock profiles to add new devices, modify charts, or collect additional OIDs.\n- **Automatic vendor/model detection**: Devices are matched to the right profile using selectors such as `sysObjectID` and `sysDescr`.\n- **ICMP ping**: Optional round-trip latency monitoring alongside SNMP, with a `ping_only` mode available.\n- **SNMP v1, v2c, and v3 support**: Fully implemented via the [gosnmp](https://github.com/gosnmp/gosnmp) library.\n\n\n**Built-in profiles for major vendors:**\n\n| Category | Vendors |\n|----------|---------|\n| Switches & Routers | Cisco (Catalyst, Nexus, ASR, ISR), Arista, Juniper, HP/HPE, Dell, Extreme |\n| Firewalls | Palo Alto, Fortinet FortiGate, Cisco ASA, Checkpoint, SonicWall |\n| Wireless | Aruba, Cisco WLC, Ubiquiti, Alcatel-Lucent |\n| Load Balancers | F5 BIG-IP, Citrix NetScaler, A10 Thunder |\n| Infrastructure | APC UPS/PDU, Dell servers, plus standard MIBs (BGP, OSPF, TCP/UDP) |\n\n> This table highlights common vendors\u2014the **full library includes many more**.\n\n\n:::info\n\nSee: [SNMP Profile Format](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/snmp/profile-format.md) to learn how to write your own or extend stock ones.\n\n:::\n\n**Profile locations**\n\n| Type | Default path | Notes |\n|------|--------------|-------|\n| **Stock profiles** | `/usr/lib/netdata/conf.d/go.d/snmp.profiles/default/` | Shipped with Netdata |\n| **User profiles** | `/etc/netdata/go.d/snmp.profiles/` | Place custom or modified profiles here |\n\n> Depending on installation, paths may be prefixed with `/opt/netdata`.\n\nA **profile** defines:\n\n- Device selectors for auto-matching (e.g. `sysObjectID`, `sysDescr`)\n- The exact OIDs to collect (scalars and tables)\n- How to label table rows (metric tags)\n- Chart/metric metadata (units, families, types), including optional **virtual metrics**\n\n**At runtime, the collector**:\n\n1. Reads standard system OIDs (e.g. `sysObjectID`, `sysDescr`) to identify the device\n2. Picks the best matching vendor/model profile(s)\n3. Collects exactly the metrics those profiles define\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nSNMP service discovery can automatically scan configured networks and feed the SNMP collector with discovered devices.\n\n- Disabled by default; enable and configure explicitly.\n- Supports single IPs, ranges, and CIDR blocks (up to 512 IPs per subnet).\n- Uses the provided SNMP credentials (v1/v2c/v3) to probe devices.\n- Caches discovery results (configurable) to reduce network load.\n- At collection time, each discovered device is matched to the appropriate [profile](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/snmp/profile-format.md) based on its `sysObjectID`, `sysDescr`, and the profile\u2019s selector rules.\n\nThe configuration file name is [go.d/sd/snmp.conf](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d/sd/snmp.conf).\n\nYou can edit the configuration file using the edit-config script from the Netdata [config directory](https://learn.netdata.cloud/docs/netdata-agent/configuration#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/sd/snmp.conf\n```\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\n**Device constraints**: Many SNMP devices (e.g., access switches) have limited CPU/ASIC time for management. If you see timeouts or gaps, reduce `update_every` or `max_repetitions`, or stagger polling across devices.\n\n**Concurrent polling**: Parallel access by multiple tools may cause missed counters on some devices. Increase the collection interval (`update_every`) to reduce request pressure.\n\n",setup:'## Setup\n\n\nYou can configure the **snmp** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **snmp**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/snmp.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Prepare the SNMP device\n\nBefore configuring the collector:\n- Enable the SNMP service on the target device (via its management interface).\n- Ensure the device is reachable from the Netdata node on UDP/161.\n- Gather connection details: IP/DNS, SNMP version, and either a community (v1/v2c) or v3 credentials (user, auth/priv).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency. | 10 | no |\n|  | autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| **Target** | hostname | Target host (IP or DNS name, IPv4/IPv6). |  | yes |\n| **SNMPv1/2** | community | SNMPv1/2 community string. | public | no |\n| **SNMPv3** | user.name | SNMPv3 user name. |  | no |\n|  | [user.level](#option-snmpv3-user-level) | Security level of SNMPv3 messages. |  | no |\n|  | [user.auth_proto](#option-snmpv3-user-auth-proto) | Authentication protocol for SNMPv3 messages. |  | no |\n|  | user.auth_key | Authentication protocol pass phrase for SNMPv3 messages. |  | no |\n|  | [user.priv_proto](#option-snmpv3-user-priv-proto) | Privacy protocol for SNMPv3 messages. |  | no |\n|  | user.priv_key | Privacy protocol pass phrase for SNMPv3 messages. |  | no |\n| **SNMP transport** | options.version | SNMP version. Available versions: 1, 2, 3. | 2 | no |\n|  | options.port | Target port. | 161 | no |\n|  | options.retries | Retries to attempt. | 1 | no |\n|  | options.timeout | SNMP request/response timeout. | 5 | no |\n|  | options.max_repetitions | Controls how many SNMP variables to retrieve in a single GETBULK request. | 25 | no |\n|  | options.max_request_size | Maximum number of OIDs allowed in a single GET request. | 60 | no |\n| **Ping** | ping_only | Collect only ICMP round-trip metrics and skip periodic SNMP polling. A minimal SNMP sysInfo probe still runs at setup for naming/labels/metadata. | no | no |\n|  | ping.enabled | Enable ICMP round-trip measurements (runs alongside SNMP). When disabled, no ping metrics are collected. | yes | no |\n|  | ping.privileged | Use raw ICMP (privileged). If false, unprivileged mode is used. | yes | no |\n|  | ping.packets | Number of ping packets to send per iteration. | 3 | no |\n|  | ping.interval | Interval between sending ping packets. | 100ms | no |\n| **Profiles** | manual_profiles | A list of profiles to force-apply when auto-detection cannot be used. | [] | no |\n| **Virtual node** | create_vnode | If set, the collector will create a Netdata Virtual Node for this SNMP device, which will appear as a separate Node in Netdata. | true | no |\n|  | vnode_device_down_threshold | Number of consecutive failed data collections before marking the device as down. | 3 | no |\n|  | vnode.guid | A unique identifier for the Virtual Node. If not set, a GUID will be automatically generated from the device\'s IP address. |  | no |\n|  | vnode.hostname | The hostname that will be used for the Virtual Node. If not set, the device\'s hostname will be used. |  | no |\n|  | vnode.labels | Additional key-value pairs to associate with the Virtual Node. |  | no |\n\n<a id="option-snmpv3-user-level"></a>\n##### user.level\n\nThe security of an SNMPv3 message as per RFC 3414 (`user.level`):\n\n| String value | Int value | Description                              |\n|:------------:|:---------:|------------------------------------------|\n|     none     |     1     | no message authentication or encryption  |\n|  authNoPriv  |     2     | message authentication and no encryption |\n|   authPriv   |     3     | message authentication and encryption    |\n\n\n<a id="option-snmpv3-user-auth-proto"></a>\n##### user.auth_proto\n\nThe digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`):\n\n| String value | Int value | Description                               |\n|:------------:|:---------:|-------------------------------------------|\n|     none     |     1     | no message authentication                 |\n|     md5      |     2     | MD5 message authentication (HMAC-MD5-96)  |\n|     sha      |     3     | SHA message authentication (HMAC-SHA-96)  |\n|    sha224    |     4     | SHA message authentication (HMAC-SHA-224) |\n|    sha256    |     5     | SHA message authentication (HMAC-SHA-256) |\n|    sha384    |     6     | SHA message authentication (HMAC-SHA-384) |\n|    sha512    |     7     | SHA message authentication (HMAC-SHA-512) |\n\n\n<a id="option-snmpv3-user-priv-proto"></a>\n##### user.priv_proto\n\nThe encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`):\n\n| String value | Int value | Description                                                             |\n|:------------:|:---------:|-------------------------------------------------------------------------|\n|     none     |     1     | no message encryption                                                   |\n|     des      |     2     | ES encryption (CBC-DES)                                                 |\n|     aes      |     3     | 128-bit AES encryption (CFB-AES-128)                                    |\n|    aes192    |     4     | 192-bit AES encryption (CFB-AES-192) with "Blumenthal" key localization |\n|    aes256    |     5     | 256-bit AES encryption (CFB-AES-256) with "Blumenthal" key localization |\n|   aes192c    |     6     | 192-bit AES encryption (CFB-AES-192) with "Reeder" key localization     |\n|   aes256c    |     7     | 256-bit AES encryption (CFB-AES-256) with "Reeder" key localization     |\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **snmp** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the snmp data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _snmp_ (or scroll the list) to locate the **snmp** collector.\n5. Click the **+** next to the **snmp** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/snmp.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/snmp.conf\n```\n\n##### Examples\n\n###### SNMPv1/2\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n\nProfiles are auto-selected at runtime\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: switch\n    update_every: 10\n    hostname: 192.0.2.1\n    community: public\n    options:\n      version: 2\n\n```\n{% /details %}\n###### SNMPv3\n\nTo use SNMPv3:\n\n- use `user` instead of `community`.\n- set `options.version` to 3.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: switch\n    update_every: 10\n    hostname: 192.0.2.1\n    options:\n      version: 3\n    user:\n      name: username\n      level: authPriv\n      auth_proto: sha256\n      auth_key: auth_protocol_passphrase\n      priv_proto: aes256\n      priv_key: priv_protocol_passphrase\n\n```\n{% /details %}\n',alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Network Interfaces\n\nProvides detailed network interface traffic and status metrics from SNMP-enabled devices.\n\nThis function queries cached SNMP interface data collected during regular polling cycles and presents it in a sortable, filterable table. Each row represents a network interface on the monitored SNMP device, with comprehensive metrics for traffic analysis, error monitoring, and operational status tracking.\n\nUse cases:\n- Identify top bandwidth-consuming interfaces on routers, switches, and access points\n- Monitor interface operational and administrative status for network health\n- Investigate packet errors, discards, and unusual traffic patterns\n\nData is sourced from the IF-MIB (RFC 2863) interface counters and is cached from the last successful SNMP collection. No additional SNMP requests are triggered when calling this function.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Snmp:interfaces` |\n| Require Cloud | no |\n| Performance | Uses cached SNMP data only, no additional SNMP requests are triggered:<br/>\u2022 Responses are instantaneous from memory cache<br/>\u2022 Large devices with many interfaces may return many rows |\n| Security | Exposes interface names, operational status, and traffic counters only:<br/>\u2022 No packet payloads or authentication credentials are exposed<br/>\u2022 No device configuration details are exposed |\n| Availability | Available when:<br/>\u2022 The collector has completed at least one data collection cycle<br/>\u2022 Interface data is cached from the last successful SNMP collection<br/>\u2022 Returns HTTP 503 if cache is not ready yet |\n\n#### Prerequisites\n\nNo additional configuration is required.\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Type Group | select | Filter interfaces by their type classification group. Custom mapping categorizes IANA interface types into practical groups for easier filtering. | yes | ethernet | Ethernet (default), Aggregation, Virtual, Other |\n\n#### Returns\n\nNetwork interface metrics from cached SNMP data, including traffic rates, packet statistics, operational status, and error counters. Each row represents one physical or virtual interface.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Interface | string |  |  | Network interface name or identifier (e.g., eth0, GigabitEthernet1/0/1, Vlan100) |\n| Type | string |  |  | IANA-assigned interface type from IF-MIB (e.g., ethernetCsmacd, ieee80211, softwareLoopback) |\n| Type Group | string |  |  | Custom categorization mapping IANA interface types into practical groups: Ethernet (physical Ethernet interfaces), Aggregation (LAG/port-channels, bonds), Virtual (VLANs, loopbacks), or Other (all remaining types) |\n| Admin Status | string |  |  | Administrative state configured on the interface: up (enabled for use), down (administratively disabled), or testing (currently in test mode). Different from operational status. |\n| Oper Status | string |  |  | Current operational state of the interface: up (operational and passing traffic), down (not operational), testing (in test mode), unknown (status cannot be determined), dormant (waiting for external actions), notPresent (interface removed but configuration remains), or lowerLayerDown (interface down due to lower-layer issues) |\n| Traffic In | float | bit/s |  | Inbound network traffic rate in bits per second. High values indicate heavy inbound data flow that may require capacity planning. |\n| Traffic Out | float | bit/s |  | Outbound network traffic rate in bits per second. High values indicate heavy outbound data flow. Compare with Traffic In to identify asymmetric usage patterns. |\n| Unicast In | float | packets/s | hidden | Rate of unicast packets (destined for a single recipient) received per second. Normal traffic pattern for point-to-point communications. |\n| Unicast Out | float | packets/s | hidden | Rate of unicast packets (addressed to a single destination) transmitted per second. |\n| Broadcast In | float | packets/s | hidden | Rate of broadcast packets (sent to all nodes on network) received per second. High values may indicate network storms, ARP flooding, or misconfigured devices. |\n| Broadcast Out | float | packets/s | hidden | Rate of broadcast packets transmitted per second. Consistently high broadcast rates can degrade network performance. |\n| Packets In | float | packets/s |  | Total inbound packet rate (sum of unicast, broadcast, and multicast) per second. Useful for overall interface load assessment. |\n| Packets Out | float | packets/s |  | Total outbound packet rate (sum of unicast, broadcast, and multicast) per second. |\n| Errors In | float | packets/s | hidden | Rate of inbound packets with errors that prevented delivery. Non-zero values indicate physical layer issues (cable problems, signal integrity) or buffer overruns. |\n| Errors Out | float | packets/s | hidden | Rate of outbound packets with transmission errors. Non-zero values may indicate interface hardware issues, cabling problems, or duplex mismatches. |\n| Discards In | float | packets/s |  | Rate of inbound packets deliberately discarded by the device (often due to resource constraints, security policies, or unrecognized frames). Unlike errors, the interface may have been functioning correctly but chose to drop the packet. |\n| Discards Out | float | packets/s |  | Rate of outbound packets deliberately discarded. Can indicate output queue overflows, ACL drops, or security policy rejections. |\n| Multicast In | float | packets/s | hidden | Rate of multicast packets (destined for a group) received per second. Common in video streaming, multicast applications, and routing protocols. |\n| Multicast Out | float | packets/s | hidden | Rate of multicast packets transmitted per second. |\n\n",metrics:"## Metrics\n\nMetrics and charts are **defined by the matched SNMP profile(s)** at runtime. They differ by vendor/model/OS and may include, for example, interface counters, optics, CPU/memory, temperature, VLANs, and more. Use the **Metrics** tab on the device\u2019s dashboard to see exactly what is collected for that device.\n\n:::tip\n\nTo understand the structure of these profiles (metrics, tags, virtual metrics, etc.), see **[SNMP Profile Format](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/snmp/profile-format.md)**.\n\n:::\n\nIf `ping.enabled` is true, ICMP latency/packet-loss charts are also provided (or exclusively, when `ping_only: true`).\n\n",troubleshooting:'## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `snmp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn\'t working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that\'s not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m snmp\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m snmp -j jobName\n  ```\n\n### Getting Logs\n\nIf you\'re encountering problems with the `snmp` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep snmp\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector\'s name:\n\n```bash\ngrep snmp /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named "netdata" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep snmp\n```\n\n### Debugging Gaps on Charts\n\nIf your SNMP charts show gaps, it means the collector could not finish metric collection before the next scheduled run. This usually happens when SNMP tables take longer to collect than your configured `update_every`.\n\nThese gaps do *not* mean the device stopped exporting SNMP metrics \u2014 only that the collector had to skip cycles.\n\n**Step 1: Check the Logs**\n\n[Look for messages](#getting-logs) like:\n\n```text\nlevel=warn msg="skipping data collection: previous run is still in progress for 4s (skipped 4 times in a row, interval 1s)" collector=snmp job=your_device\nlevel=info msg="data collection resumed after 4.36s (skipped 4 times)" collector=snmp job=your_device\n```\n\nThe \u201cresumed after\u201d message shows how long the previous collection actually took.  \nFor example, if a run needs ~4.4 seconds and `update_every` is 1 second, 4 cycles will be skipped.\n\n\n**Step 2: Check Collection Timings**\n\nOpen **SNMP \u2192 Internal \u2192 Stats** in the dashboard.  \nThe **SNMP profile collection timings** chart shows how long each part of the SNMP polling takes.  \nTable metrics are usually the slowest and often determine the total collection time.\n\n**Step 3: Increase the data collection interval**\n\n[Set `update_every`](#setup) to a value **higher than your slowest collection time**, with some extra buffer for network variability.\n\n| Typical Collection Time | Recommended `update_every` |\n|-------------------------|-----------------------------|\n| < 2 seconds             | 2 seconds                  |\n| 2\u20135 seconds             | 5 seconds                  |\n| 5\u201310 seconds            | 10 seconds                 |\n| > 10 seconds            | collection_time \xd7 2        |\n\n:::info\n\n- **Rule of thumb:** `update_every` should be at least 2\xd7 your slowest table collection time.  \n- The default `update_every: 10` works well in most environments.  \n- Only reduce it if your device consistently responds fast enough.\n\n:::\n\n**Quick Checklist**\n1. Do logs show \u201cskipping data collection\u201d?  \n2. Does *Internal \u2192 Stats* show collection time > `update_every`?  \n3. Increase `update_every` until skips disappear.\n\n\n',integration_type:"collector",id:"go.d.plugin-snmp-SNMP_devices",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/snmp/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-spigotmc",plugin_name:"go.d.plugin",module_name:"spigotmc",monitored_instance:{name:"SpigotMC",link:"https://www.spigotmc.org/",categories:["data-collection.applications"],icon_filename:"spigot.jfif"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["minecraft","spigotmc","spigot"]},overview:"# SpigotMC\n\nPlugin: go.d.plugin\nModule: spigotmc\n\n## Overview\n\nThis collector monitors SpigotMC server server performance, in the form of ticks per second average, memory utilization, and active users.\n\n\nIt sends the `tps` and `list` commands to the Server, and gathers the metrics from the responses.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects SpigotMC instances running on localhost that are listening on port 25575.\n\n> **Note that the SpigotMC RCON API requires a password**. \n> While Netdata can automatically detect SpigotMC instances and create data collection jobs, these jobs will fail unless you provide the necessary credentials.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **spigotmc** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **spigotmc**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/spigotmc.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | SpigotMC server RCON address (IP:PORT). | 127.0.0.1:25575 | yes |\n|  | timeout | Connection, read, and write timeout duration (seconds). Includes name resolution. | 1 | no |\n| **Auth** | password | RCON password for authentication. |  | yes |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **spigotmc** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the spigotmc data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _spigotmc_ (or scroll the list) to locate the **spigotmc** collector.\n5. Click the **+** next to the **spigotmc** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/spigotmc.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/spigotmc.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:25575\n    password: somePassword\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:25575\n    password: somePassword\n\n  - name: remote\n    address: 203.0.113.0:25575\n    password: somePassword\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `spigotmc` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m spigotmc\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m spigotmc -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `spigotmc` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep spigotmc\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep spigotmc /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep spigotmc\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SpigotMC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| spigotmc.players | players | players |\n| spigotmc.avg_tps | 1min, 5min, 15min | ticks |\n| spigotmc.memory | used, alloc | bytes |\n\n",integration_type:"collector",id:"go.d.plugin-spigotmc-SpigotMC",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/spigotmc/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-sql",plugin_name:"go.d.plugin",module_name:"sql",monitored_instance:{name:"SQL databases (generic)",link:"https://en.wikipedia.org/wiki/SQL",categories:["data-collection.databases"],icon_filename:"sql.svg"},related_resources:{integrations:{list:[]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["db","database","sql","mysql","maria","postgres","postgresql","pgx","oracle","sqlserver","mssql","generic"]},overview:"# SQL databases (generic)\n\nPlugin: go.d.plugin\nModule: sql\n\n## Overview\n\nMetrics and charts for this collector are **entirely defined by your SQL\nconfiguration**. There is no fixed metric reference: each job can expose\ndifferent metrics depending on its `metrics` and `queries` blocks.\n\nTo see what a specific job collects, open that job's dashboard in Netdata\nand inspect the charts and dimensions it created.\n\nJobs can also define **functions** that provide interactive table views in\nNetdata's Live tab. A job can have metrics only, functions only, or both.\n\n:::tip\n\nTo change what is collected, edit the `metrics` (and optional `queries`)\nin the job configuration. After you save the changes, the updated set of\ncharts and metrics is reflected in Netdata after the next data collection.\n\n:::\n\n\nThe collector connects to your database using Go\u2019s **database/sql** package\nand the selected driver:\n\n  - `mysql` \u2014 MySQL / MariaDB\n  - `pgx`   \u2014 PostgreSQL\n  - `oracle` \u2014 Oracle Database\n  - `sqlserver` \u2014 Microsoft SQL Server / Azure SQL\n\nFor each metric block you define, it executes the SQL query (inline or via\n`query_ref`), reads the result set, and maps it to Netdata charts and\ndimensions.\n\nAdditionally, you can define **functions** that expose SQL query results as\ninteractive table views in Netdata's Live tab. Functions support filtering,\nsorting, and searching without creating persistent metrics.\n\n### Result Processing Modes\n\n| Mode       | How it works                                                                 | Best used when                                      |\n|------------|------------------------------------------------------------------------------|-----------------------------------------------------|\n| **columns**| Specific numeric columns from each row become dimensions on your charts.     | The result set has stable, known column names.      |\n| **kv**     | One column provides metric names (keys) and another provides their values.   | The set of metrics is dynamic or key\u2013value shaped.  |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis is a **generic collector** and does **not** perform automatic detection.\n\nIt does not create any jobs on its own \u2014 you must configure at least one\njob before it can collect data.\n\n\n#### Limits\n\nThere are no built-in limits on the number of queries or rows processed.\nHowever, each metric block must define at least one chart, and each chart\nmust define at least one dimension.\n\nKeep your queries lightweight and scoped to the data you actually need\nto avoid adding load on the database server.\n\n\n#### Performance Impact\n\nPerformance impact depends entirely on the queries you configure and the\ncollection frequency (update_every).\n\nPrefer indexed reads, avoid full table scans or heavy aggregations, and\nconsider using database views tailored for monitoring.\n\n",setup:'## Setup\n\n\nYou can configure the **sql** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **sql**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/sql.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Create a read-only database user\n\nCreate a dedicated user for Netdata with read-only privileges on the\nviews/tables used in your monitoring queries.\n\nFor example, on a typical RDBMS you would:\n\n- Create a user.\n- Grant SELECT on system metrics views or monitoring views.\n\nAfter creating the user and updating the configuration, restart the\nNetdata Agent with `sudo systemctl restart netdata`, or the appropriate\nmethod for your system.\n\n\n#### Allow Netdata to connect to the database\n\nEnsure the Netdata host can reach the database via the configured DSN,\neither using:\n\n- a local UNIX/TCP socket, or\n- a network connection (hostname/IP and port).\n\nIf the database is remote, make sure any firewalls or security groups\nallow connections from the Netdata node.\n\n\n\n### Configuration\n\n#### Options\n\n**Full Configuration Structure**\n\n```yaml\n# ---------- CONNECTION ----------\ndriver: <mysql|pgx|oracle|sqlserver>      # REQUIRED. SQL driver.\ndsn: "<connection string>"                   # REQUIRED. Driver-specific DSN/URL.\n\n# Optional connection settings\ntimeout: <seconds>                           # OPTIONAL. Query timeout.\n\n# Optional static labels applied to all charts\nstatic_labels:\n  <label_key1>: <label_value>\n  <label_key2>: <label_value>\n\n# ---------- REUSABLE QUERIES ----------\n# Optional. Define reusable SQL queries referenced later via query_ref.\nqueries:\n  - id: <query_id>\n    query: |\n      SELECT ...\n\n# ---------- METRICS ----------\n# Each metric block runs one query and generates one or more charts.\nmetrics:\n  - id: <metric_block_id>                    # REQUIRED. Unique within this job.\n\n    # Choose ONE of these:\n    query_ref: <query_id>                    # Use a reusable query\n    # OR\n    # query: |                               # Inline SQL\n    #   SELECT ...\n\n    mode: <columns|kv>                       # REQUIRED. How to interpret result rows.\n\n    # KV mode settings (only when mode: kv)\n    kv_mode:\n      name_col: <column_name>                # Column containing keys\n      value_col: <column_name>               # Column containing numeric values\n\n    # Optional: derive labels from row columns (creates per-label charts)\n    labels_from_row:\n      - source: <column_name>                # Column name from result set\n        name: <label_key>                    # Label key exposed to Netdata\n      - source: <column_name>\n        name: <label_key>\n\n    # Charts produced by this metric block\n    charts:\n      - title: "<Chart Title>"               # REQUIRED. Shown in dashboards.\n        context: "<context.name>"            # REQUIRED. Netdata context.\n        family: "<family>"                   # REQUIRED. Netdata chart family.\n        units: "<units>"                     # REQUIRED. Unit string for the chart.\n        type: <line|stacked|area>            # OPTIONAL. Default: line.\n        algorithm: <absolute|incremental>    # OPTIONAL. Default: absolute.\n\n        dims:\n          # ---- COLUMNS MODE DIM ----\n          # In mode: columns, `source` MUST be a numeric COLUMN name from the result set.\n          - name: <dim_id>                   # REQUIRED. Dimension id (unique within this chart).\n            source: <column_name>            # REQUIRED. Numeric column to chart.\n\n          # ---- KV MODE DIM ----\n          # In mode: kv, `source` MUST be a KEY name (NOT a column).\n          # The collector finds the row where (row[kv_mode.name_col] == `source`)\n          # and uses row[kv_mode.value_col].\n          - name: <dim_id>\n            source: <key_name>               # REQUIRED. Key name resolved via kv_mode.name_col.\n\n          # ---- STATUS DIM (one-hot 1/0) ----\n          # Works in BOTH modes. Evaluates `status_when` against the resolved value:\n          #   * columns mode: the value in the specified column for the row\n          #   * kv mode:      the value for the resolved key (row[kv_mode.value_col])\n          - name: <dim_id>\n            source: <column_name_or_key_name>  # Same interpretation as above, per mode.\n            status_when: # Exactly ONE of the following:\n              equals: <string|number|bool>    # Active (1) if value == this literal.\n              # in: [ <v1>, <v2>, ... ]       # Active if value is in the list.\n              # match: \'^regex$\'              # Active if value matches this regex.\n\n# ---------- FUNCTIONS ----------\n# Set function_only: true if this job only provides functions (no metrics).\nfunction_only: <true|false>                    # OPTIONAL. Default: false.\n\n# Expose SQL queries as interactive table views in Netdata\'s Live tab.\nfunctions:\n  - id: <function_id>                          # REQUIRED. Unique identifier.\n    name: <display_name>                       # OPTIONAL. Derived from id if not set.\n    description: <help_text>                   # OPTIONAL. Shown in the UI.\n    query: |                                   # REQUIRED. SQL to execute.\n      SELECT ...\n    timeout: <seconds>                         # OPTIONAL. Query timeout.\n    limit: <max_rows>                          # OPTIONAL. Default: 100.\n    default_sort: <column_name>                # OPTIONAL. Initial sort column.\n    default_sort_desc: <true|false>            # OPTIONAL. Default: true.\n    columns:                                   # OPTIONAL. Override column metadata.\n      <column_name>:\n        type: <string|integer|float|boolean|duration|timestamp>\n        units: <unit_string>\n        tooltip: <hover_text>\n        visible: <true|false>\n        sortable: <true|false>\n```\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Not used for this collector. Set 0 to disable. | 0 | no |\n| **Target** | driver | SQL driver to use. Supported values: `mysql`, `pgx`, `oracle`, `sqlserver`. | mysql | yes |\n|  | dsn | Database connection string (DSN). The format depends on the selected driver ( [MySQL](https://github.com/go-sql-driver/mysql#dsn-data-source-name), [PostgreSQL](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING-URIS), [MS SQL Server](https://github.com/denisenkom/go-mssqldb#connection-parameters-and-dsn)). |  | yes |\n| **Connection** | timeout | Query and connection check timeout (seconds). | 5 | no |\n| **Labels** | static_labels | A map of static labels added to every chart created by this job. Useful for tagging charts with environment, region, or role. | {} | no |\n| **Queries & Metrics** | queries | A list of reusable queries. Metric blocks can reference these via `query_ref` to avoid repeating SQL. See [Configuration Structure](#configuration) for details. | [] | no |\n|  | metrics | A list of metric blocks. Each block defines how a query is executed and how its result is transformed into one or more charts. See [Configuration Structure](#configuration) for details. | [] | no |\n| **Functions** | functions | A list of SQL functions exposed as interactive table views in Netdata\'s Live tab. Each function runs a SQL query and displays results in a filterable, sortable table. See [Functions](#functions) for details. | [] | no |\n|  | functions[].id | Unique identifier for this function. |  | yes |\n|  | functions[].name | Display name shown in the UI. Auto-derived from ID if not set. |  | no |\n|  | functions[].description | Help text shown in the UI. |  | no |\n|  | functions[].query | SQL query to execute when this function is called. |  | yes |\n|  | functions[].timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions[].limit | Maximum rows to return. | 100 | no |\n|  | functions[].default_sort | Column name for initial sort order. |  | no |\n|  | functions[].default_sort_desc | Sort in descending order by default. | yes | no |\n|  | functions[].columns | Override auto-detected column metadata. Map of column name to settings (type, units, tooltip, visible, sortable). | {} | no |\n|  | function_only | Set to true if this job only provides functions (no metrics). When enabled, metrics configuration is not required and no charts are created. | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a Virtual Node. |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **sql** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the sql data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _sql_ (or scroll the list) to locate the **sql** collector.\n5. Click the **+** next to the **sql** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/sql.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/sql.conf\n```\n\n##### Examples\n\n###### Columns mode \u2013 per-database conflicts (with labels)\n\nPostgreSQL example that collects database-level conflict counters from\n`pg_stat_database_conflicts` and creates a separate chart instance per\ndatabase using `labels_from_row`.\n\nThe query:\n\n```sql\nSELECT\n  datname,\n  confl_tablespace,\n  confl_lock,\n  confl_snapshot,\n  confl_bufferpin,\n  confl_deadlock\nFROM pg_stat_database_conflicts;\n```\n\nExample output:\n\n| datname    | confl_tablespace | confl_lock | confl_snapshot | confl_bufferpin | confl_deadlock |\n|------------|------------------|------------|----------------|-----------------|----------------|\n| postgres   | 0                | 0          | 0              | 0               | 0              |\n| production | 0                | 0          | 0              | 0               | 0              |\n\nThis configuration turns each row into a **chart instance** (one for\n`db=postgres`, one for `db=production`) with five dimensions\n(`confl_tablespace`, `confl_lock`, `confl_snapshot`, `confl_bufferpin`,\n`confl_deadlock`).\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: pg_conflicts_per_db\n    driver: pgx\n    dsn: \'postgresql://netdata:password@127.0.0.1:5432/postgres\'\n    timeout: 5\n\n    metrics:\n      - id: conflicts\n        mode: columns\n        query: |\n          SELECT\n            datname,\n            confl_tablespace,\n            confl_lock,\n            confl_snapshot,\n            confl_bufferpin,\n            confl_deadlock\n          FROM pg_stat_database_conflicts;\n        labels_from_row:\n          - source: datname\n            name: db\n        charts:\n          - title: "PostgreSQL conflicts"\n            context: sql.pg_conflicts\n            family: conflicts\n            units: conflicts\n            type: line\n            algorithm: absolute\n            dims:\n              - name: confl_tablespace\n                source: confl_tablespace\n              - name: confl_lock\n                source: confl_lock\n              - name: confl_snapshot\n                source: confl_snapshot\n              - name: confl_bufferpin\n                source: confl_bufferpin\n              - name: confl_deadlock\n                source: confl_deadlock\n\n```\n{% /details %}\n###### Columns mode \u2013 single numeric value (uptime)\n\nPostgreSQL example that exposes a single numeric metric (server uptime in\nseconds) as a one-dimension chart using columns mode.\n\nThe query:\n\n```sql\nSELECT\n  EXTRACT(\n    EPOCH FROM (now() - pg_postmaster_start_time())\n  ) AS uptime_seconds;\n```\n\nExample output:\n\n| uptime_seconds |\n|----------------|\n| 50.867359      |\n\nThis configuration maps the `uptime_seconds` column to a single\n`uptime` dimension on the `sql.pg_uptime` chart.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: pg_uptime\n    driver: pgx\n    dsn: \'postgresql://netdata:password@127.0.0.1:5432/postgres\'\n    timeout: 5\n\n    metrics:\n      - id: uptime\n        mode: columns\n        query: |\n          SELECT\n            EXTRACT(\n              EPOCH FROM (now() - pg_postmaster_start_time())\n            ) AS uptime_seconds;\n        charts:\n          - title: "PostgreSQL uptime"\n            context: sql.pg_uptime\n            family: uptime\n            units: seconds\n            type: line\n            algorithm: absolute\n            dims:\n              - name: uptime\n                source: uptime_seconds\n\n```\n{% /details %}\n###### KV mode \u2013 connection states as key/value pairs\n\nPostgreSQL example that aggregates connection states from\n`pg_stat_activity` and uses kv mode to map each state to a dimension.\n\nThe query:\n\n```sql\nSELECT\n  state,\n  count(*) AS cnt\nFROM pg_stat_activity\nGROUP BY state;\n```\n\nExample output:\n\n| state                        | cnt |\n|------------------------------|-----|\n| active                       |   1 |\n| idle                         |  14 |\n| idle in transaction          |   7 |\n| idle in transaction (aborted)|   1 |\n| fastpath function call       |   1 |\n| disabled                     |   1 |\n\nWith `mode: kv`, `state` becomes the **key** and `cnt` the **value**.\nEach distinct `state` value is mapped to a chart dimension via `dims[*].source`.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: pg_activity_states\n    driver: pgx\n    dsn: \'postgresql://netdata:password@127.0.0.1:5432/postgres\'\n    timeout: 5\n\n    metrics:\n      - id: activity_states\n        mode: kv\n        query: |\n          SELECT\n            state,\n            count(*) AS cnt\n          FROM pg_stat_activity\n          GROUP BY state;\n        kv_mode:\n          name_col: state\n          value_col: cnt\n        charts:\n          - title: "PostgreSQL connection states"\n            context: sql.pg_activity_states\n            family: connections\n            units: connections\n            type: stacked\n            algorithm: absolute\n            dims:\n              - name: active\n                source: active\n              - name: idle\n                source: idle\n              - name: idle_in_transaction\n                source: "idle in transaction"\n              - name: idle_in_transaction_aborted\n                source: "idle in transaction (aborted)"\n              - name: fastpath_function_call\n                source: "fastpath function call"\n              - name: disabled\n                source: disabled\n\n```\n{% /details %}\n###### Columns mode \u2013 map state values to a status metric\n\nSimple PostgreSQL example that turns a boolean-like state into a 0/1\nstatus metric using `status_when`.\n\nThe query:\n\n```sql\nSELECT pg_is_in_recovery();\n```\n\nExample output:\n\n| pg_is_in_recovery |\n|-------------------|\n| f                 |\n\nThis configuration creates a single chart with two status dimensions:\n  - `in_recovery` becomes **1 when the value is `"t"`** and **0 otherwise**.\n  - `not_in_recovery` becomes **1 when the value is `"f"`** and **0 otherwise**.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: pg_recovery_status\n    driver: pgx\n    dsn: \'postgresql://netdata:password@127.0.0.1:5432/postgres\'\n    timeout: 5\n\n    metrics:\n      - id: recovery_status\n        mode: columns\n        query: |\n          SELECT pg_is_in_recovery();\n        charts:\n          - title: "PostgreSQL recovery status"\n            context: sql.pg_recovery_status\n            family: state\n            units: status\n            type: line\n            algorithm: absolute\n            dims:\n              - name: in_recovery\n                source: pg_is_in_recovery\n                status_when:\n                  equals: "t"\n              - name: not_in_recovery\n                source: pg_is_in_recovery\n                status_when:\n                  equals: "f"\n\n```\n{% /details %}\n###### Function-only mode \u2013 slow query analysis\n\nPostgreSQL example that provides an interactive slow query analysis view\nwithout collecting any time-series metrics.\n\nThis is useful for ad-hoc troubleshooting via the Netdata **Live** tab.\nThe function queries `pg_stat_statements` to show the slowest queries\nsorted by total execution time.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: pg_slow_queries\n    driver: pgx\n    dsn: \'postgresql://netdata:password@127.0.0.1:5432/postgres\'\n    timeout: 10\n    function_only: true\n\n    functions:\n      - id: slow-queries\n        name: Slow Queries\n        description: Top queries by total execution time from pg_stat_statements\n        query: |\n          SELECT\n            queryid,\n            LEFT(query, 100) AS query,\n            calls,\n            total_exec_time,\n            mean_exec_time,\n            rows\n          FROM pg_stat_statements\n          ORDER BY total_exec_time DESC\n        limit: 100\n        default_sort: total_exec_time\n        default_sort_desc: true\n        columns:\n          total_exec_time:\n            type: duration\n            units: milliseconds\n            tooltip: Total time spent executing this query\n          mean_exec_time:\n            type: duration\n            units: milliseconds\n            tooltip: Average execution time per call\n\n```\n{% /details %}\n###### Combined metrics and functions\n\nPostgreSQL example that collects time-series metrics AND provides\ninteractive function views in the same job.\n\n- The `metrics` block creates charts for connection states.\n- The `functions` block provides an interactive activity view.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: pg_combined\n    driver: pgx\n    dsn: \'postgresql://netdata:password@127.0.0.1:5432/postgres\'\n    timeout: 5\n\n    # Time-series metrics\n    metrics:\n      - id: connections\n        mode: kv\n        query: |\n          SELECT state, count(*) AS cnt\n          FROM pg_stat_activity\n          GROUP BY state\n        kv_mode:\n          name_col: state\n          value_col: cnt\n        charts:\n          - title: "Connection states"\n            context: sql.pg_connections\n            family: connections\n            units: connections\n            type: stacked\n            dims:\n              - name: active\n                source: active\n              - name: idle\n                source: idle\n\n    # Interactive functions\n    functions:\n      - id: active-sessions\n        name: Active Sessions\n        description: Currently running queries\n        query: |\n          SELECT\n            pid,\n            usename,\n            datname,\n            state,\n            query_start,\n            LEFT(query, 200) AS query\n          FROM pg_stat_activity\n          WHERE state = \'active\'\n        limit: 50\n        columns:\n          query_start:\n            type: timestamp\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `sql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m sql\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m sql -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `sql` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep sql\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep sql /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep sql\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",functions:"## Functions\n\nThis collector supports user-defined SQL functions that expose query results as\ninteractive table views in Netdata's **Live** tab. Functions are configured per job\nin the `functions` section of the job configuration. Since functions are entirely\nuser-defined, no predefined functions are listed here.\n\nIn the Live tab, functions appear in a hierarchical menu:\n\n```\nDatabases\n\u2514\u2500\u2500 SQL\n    \u2514\u2500\u2500 <job_name>\n        \u251c\u2500\u2500 <function_name_1>\n        \u2514\u2500\u2500 <function_name_2>\n```\n\nEach job creates its own group containing all functions defined for that job.\n\n\n",metrics:"## Metrics\n\nMetrics and charts are **defined by your SQL queries and metric blocks** at runtime. They differ by database engine, schema, and configuration, and may include, for example, connection counts, cache hit ratios, row throughput, lock statistics, or custom business KPIs. Use the **Metrics** tab on the job\u2019s dashboard to see exactly what is collected for that job.\n\n:::tip\n\n To change what is collected, edit the `metrics` (and optionally `queries`) sections in `go.d/sql.conf` for the corresponding job. Each change is reflected in Netdata charts after the next data collection.\n\n:::\n\n",integration_type:"collector",id:"go.d.plugin-sql-SQL_databases_(generic)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/sql/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-squid",plugin_name:"go.d.plugin",module_name:"squid",monitored_instance:{name:"Squid",link:"https://www.squid-cache.org/",categories:["data-collection.web-servers-and-proxies"],icon_filename:"squid.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["squid","web delivery","squid caching proxy"]},overview:"# Squid\n\nPlugin: go.d.plugin\nModule: squid\n\n## Overview\n\nThis collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.\n\n\nIt collects metrics from the `squid-internal-mgr/counters` endpoint.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Squid instances running on localhost that are listening on port 3128.\nOn startup, it tries to collect metrics from:\n\n- https://127.0.0.1:3128\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **squid** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **squid**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/squid.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:3128 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **squid** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the squid data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _squid_ (or scroll the list) to locate the **squid** collector.\n5. Click the **+** next to the **squid** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/squid.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squid.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:3128\n\n```\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:3128\n\n  - name: remote\n    url: http://192.0.2.1:3128\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `squid` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m squid\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m squid -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `squid` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep squid\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep squid /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep squid\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid instance\n\nThese metrics refer to each monitored Squid instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squid.clients_net | in, out, hits | kilobits/s |\n| squid.clients_requests | requests, hits, errors | requests/s |\n| squid.servers_net | in, out | kilobits/s |\n| squid.servers_requests | requests, errors | requests/s |\n\n",integration_type:"collector",id:"go.d.plugin-squid-Squid",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/squid/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-squidlog",plugin_name:"go.d.plugin",module_name:"squidlog",monitored_instance:{name:"Squid log files",link:"http://www.squid-cache.org/",icon_filename:"squid.png",categories:["data-collection.web-servers-and-proxies"]},keywords:["squid","logs"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Squid log files\n\nPlugin: go.d.plugin\nModule: squidlog\n\n## Overview\n\nhis collector monitors Squid servers by parsing their access log files.\n\n\nIt automatically detects log files of Squid severs running on localhost.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **squidlog** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **squidlog**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/squidlog.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nSquid [log format codes](https://www.squid-cache.org/Doc/config/logformat/).\n\nSquidlog is aware how to parse and interpret the following codes:\n\n| field          | squid format code | description                                                   |\n|----------------|-------------------|---------------------------------------------------------------|\n| resp_time      | %tr               | Response time (milliseconds).                                 |\n| client_address | %>a               | Client source IP address.                                     |\n| client_address | %>A               | Client FQDN.                                                  |\n| cache_code     | %Ss               | Squid request status (TCP_MISS etc).                          |\n| http_code      | %>Hs              | The HTTP response status code from Content Gateway to client. |\n| resp_size      | %<st              | Total size of reply sent to client (after adaptation).        |\n| req_method     | %rm               | Request method (GET/POST etc).                                |\n| hier_code      | %Sh               | Squid hierarchy status (DEFAULT_PARENT etc).                  |\n| server_address | %<a               | Server IP address of the last server or peer connection.      |\n| server_address | %<A               | Server FQDN or peer name.                                     |\n| mime_type      | %mt               | MIME content type.                                            |\n\nIn addition, to make `Squid` [native log format](https://wiki.squid-cache.org/Features/LogFormat#Squid_native_access.log_format_in_detail) csv parsable, squidlog understands these groups of codes:\n\n| field       | squid format code | description                        |\n|-------------|-------------------|------------------------------------|\n| result_code | %Ss/%>Hs          | Cache code and http code.          |\n| hierarchy   | %Sh/%<a           | Hierarchy code and server address. |\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | path | Path to the Squid access log file. | /var/log/squid/access.log | yes |\n|  | exclude_path | File path patterns to exclude. | *.gz | no |\n| **Parser** | parser | Log parser configuration block. |  | no |\n|  | parser.log_type | Log parser type (`csv`, `ltsv`, `regexp`, or `auto`). | auto | no |\n|  | parser.csv_config | CSV log parser configuration block. |  | no |\n|  | parser.csv_config.delimiter | CSV field delimiter. | space | no |\n|  | parser.csv_config.format | CSV log format string. | - $resp_time $client_address $result_code $resp_size $req_method - - $hierarchy $mime_type | yes |\n|  | parser.ltsv_config | LTSV log parser configuration block. |  | no |\n|  | parser.ltsv_config.field_delimiter | LTSV field delimiter. | \\t | no |\n|  | parser.ltsv_config.value_delimiter | LTSV value delimiter. | : | no |\n|  | parser.ltsv_config.mapping | LTSV fields mapping to known fields. |  | yes |\n|  | parser.regexp_config | RegExp log parser configuration block. |  | no |\n|  | parser.regexp_config.pattern | RegExp pattern with named groups mapped to known fields. |  | yes |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **squidlog** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the squidlog data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _squidlog_ (or scroll the list) to locate the **squidlog** collector.\n5. Click the **+** next to the **squidlog** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/squidlog.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squidlog.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `squidlog` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m squidlog\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m squidlog -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `squidlog` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep squidlog\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep squidlog /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep squidlog\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squidlog.requests | requests | requests/s |\n| squidlog.excluded_requests | unmatched | requests/s |\n| squidlog.type_requests | success, bad, redirect, error | requests/s |\n| squidlog.http_status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| squidlog.http_status_code_responses | a dimension per HTTP response code | responses/s |\n| squidlog.bandwidth | sent | kilobits/s |\n| squidlog.response_time | min, max, avg | milliseconds |\n| squidlog.uniq_clients | clients | clients |\n| squidlog.cache_result_code_requests | a dimension per cache result code | requests/s |\n| squidlog.cache_result_code_transport_tag_requests | a dimension per cache result delivery transport tag | requests/s |\n| squidlog.cache_result_code_handling_tag_requests | a dimension per cache result handling tag | requests/s |\n| squidlog.cache_code_object_tag_requests | a dimension per cache result produced object tag | requests/s |\n| squidlog.cache_code_load_source_tag_requests | a dimension per cache result load source tag | requests/s |\n| squidlog.cache_code_error_tag_requests | a dimension per cache result error tag | requests/s |\n| squidlog.http_method_requests | a dimension per HTTP method | requests/s |\n| squidlog.mime_type_requests | a dimension per MIME type | requests/s |\n| squidlog.hier_code_requests | a dimension per hierarchy code | requests/s |\n| squidlog.server_address_forwarded_requests | a dimension per server address | requests/s |\n\n",integration_type:"collector",id:"go.d.plugin-squidlog-Squid_log_files",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/squidlog/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-storcli",plugin_name:"go.d.plugin",module_name:"storcli",monitored_instance:{name:"StoreCLI RAID",link:"https://docs.broadcom.com/doc/12352476",icon_filename:"hard-drive.svg",categories:["data-collection.storage"]},keywords:["storage","raid-controller","manage-disks"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# StoreCLI RAID\n\nPlugin: go.d.plugin\nModule: storcli\n\n## Overview\n\nMonitors the health of StoreCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.\nIt relies on the [`storcli`](https://docs.broadcom.com/doc/12352476) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\nExecuted commands:\n-  `storcli /cALL show all J nolog`\n-  `storcli /cALL/eALL/sALL show all J nolog`\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **storcli** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **storcli**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/storcli.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| timeout | storcli binary execution timeout. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **storcli** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the storcli data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _storcli_ (or scroll the list) to locate the **storcli** collector.\n5. Click the **+** next to the **storcli** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/storcli.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/storcli.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: storcli\n    update_every: 5  # Collect StorCLI RAID statistics every 5 seconds\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `storcli` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m storcli\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m storcli -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `storcli` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep storcli\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep storcli /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep storcli\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ storcli_controller_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.controller_health_status | RAID controller ${label:controller_number} is unhealthy |\n| [ storcli_controller_bbu_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.controller_bbu_status | RAID controller ${label:controller_number} BBU is unhealthy |\n| [ storcli_phys_drive_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.phys_drive_errors | RAID physical drive c${label:controller_number}/e${label:enclosure_number}/s${label:slot_number} errors |\n| [ storcli_phys_drive_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.phys_drive_predictive_failures | RAID physical drive c${label:controller_number}/e${label:enclosure_number}/s${label:slot_number} predictive failures |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per controller\n\nThese metrics refer to the Controller.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| model | Controller model |\n| driver_name | Controller driver (megaraid_sas or mpt3sas) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.controller_health_status | healthy, unhealthy | status |\n| storcli.controller_status | optimal, degraded, partially_degraded, failed | status |\n| storcli.controller_bbu_status | healthy, unhealthy, na | status |\n| storcli.controller_roc_temperature | temperature | Celsius |\n\n### Per physical drive\n\nThese metrics refer to the Physical Drive.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| enclosure_number | Enclosure number (index) |\n| slot_number | Slot number (index) |\n| media type | Media type (e.g. HDD) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.phys_drive_errors | media, other | errors/s |\n| storcli.phys_drive_predictive_failures | predictive_failures | failures/s |\n| storcli.phys_drive_smart_alert_status | active, inactive | status |\n| storcli.phys_drive_temperature | temperature | Celsius |\n\n### Per bbu\n\nThese metrics refer to the Backup Battery Unit.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| controller_number | Controller number (index) |\n| bbu_number | BBU number (index) |\n| model | BBU model |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| storcli.bbu_temperature | temperature | Celsius |\n\n",integration_type:"collector",id:"go.d.plugin-storcli-StoreCLI_RAID",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/storcli/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-supervisord",plugin_name:"go.d.plugin",module_name:"supervisord",monitored_instance:{name:"Supervisor",link:"http://supervisord.org/",icon_filename:"supervisord.png",categories:["data-collection.operating-systems"]},keywords:["supervisor"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Supervisor\n\nPlugin: go.d.plugin\nModule: supervisord\n\n## Overview\n\nThis collector monitors Supervisor instances.\n\nIt can collect metrics from:\n\n- [unix socket](http://supervisord.org/configuration.html?highlight=unix_http_server#unix-http-server-section-values)\n- [internal http server](http://supervisord.org/configuration.html?highlight=unix_http_server#inet-http-server-section-settings)\n\nUsed methods:\n\n- [`supervisor.getAllProcessInfo`](http://supervisord.org/api.html#supervisor.rpcinterface.SupervisorNamespaceRPCInterface.getAllProcessInfo)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\nYou can configure the **supervisord** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **supervisord**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/supervisord.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Supervisord XML-RPC endpoint URL. | http://127.0.0.1:9001/RPC2 | yes |\n|  | timeout | Request timeout (seconds). | 1 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **supervisord** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the supervisord data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _supervisord_ (or scroll the list) to locate the **supervisord** collector.\n5. Click the **+** next to the **supervisord** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/supervisord.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/supervisord.conf\n```\n\n##### Examples\n\n###### HTTP\n\nCollect metrics via HTTP.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    url: 'http://127.0.0.1:9001/RPC2'\n\n```\n{% /details %}\n###### Socket\n\nCollect metrics via Unix socket.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n- name: local\n  url: 'unix:///run/supervisor.sock'\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name: local\n    url: 'http://127.0.0.1:9001/RPC2'\n\n  - name: remote\n    url: 'http://192.0.2.1:9001/RPC2'\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `supervisord` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m supervisord\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m supervisord -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `supervisord` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep supervisord\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep supervisord /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep supervisord\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Supervisor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.summary_processes | running, non-running | processes |\n\n### Per process group\n\nThese metrics refer to the process group.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.processes | running, non-running | processes |\n| supervisord.process_state_code | a dimension per process | code |\n| supervisord.process_exit_status | a dimension per process | exit status |\n| supervisord.process_uptime | a dimension per process | seconds |\n| supervisord.process_downtime | a dimension per process | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-supervisord-Supervisor",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/supervisord/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-systemdunits",plugin_name:"go.d.plugin",module_name:"systemdunits",monitored_instance:{name:"Systemd Units",link:"https://www.freedesktop.org/wiki/Software/systemd/",icon_filename:"systemd.svg",categories:["data-collection.operating-systems"]},keywords:["systemd"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Systemd Units\n\nPlugin: go.d.plugin\nModule: systemdunits\n\n## Overview\n\nThis collector monitors the state of Systemd units and unit files.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **systemdunits** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **systemdunits**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/systemdunits.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency. | 1 | no |\n|  | autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n|  | timeout | System bus requests timeout. | 1 | no |\n| **Units** | [include](#option-units-include) | Systemd units selector. | *.service | no |\n|  | skip_transient | If set, skip data collection for systemd transient units. | false | no |\n| **Unit Files** | collect_unit_files | If set to true, collect the state of installed unit files. Enabling this may increase system overhead. | false | no |\n|  | collect_unit_files_every | Interval for querying systemd about unit files and their enablement state, measured in seconds. Data is cached for this interval to reduce system overhead. | 300 | no |\n|  | [include_unit_files](#option-unit-files-include-unit-files) | Systemd unit files selector. | *.service | no |\n\n<a id="option-units-include"></a>\n##### include\n\nSystemd units matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n  - pattern1\n  - pattern2\n```\n\n\n<a id="option-unit-files-include-unit-files"></a>\n##### include_unit_files\n\nSystemd unit files matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n  - pattern1\n  - pattern2\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **systemdunits** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the systemdunits data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _systemdunits_ (or scroll the list) to locate the **systemdunits** collector.\n5. Click the **+** next to the **systemdunits** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/systemdunits.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/systemdunits.conf\n```\n\n##### Examples\n\n###### Service units\n\nCollect state of all service type units.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: service\n    include:\n      - \'*.service\'\n\n```\n{% /details %}\n###### One specific unit\n\nCollect state of one specific unit.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: my-specific-service\n    include:\n      - \'my-specific.service\'\n\n```\n{% /details %}\n###### All unit types\n\nCollect state of all units.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: my-specific-service-unit\n    include:\n      - \'*\'\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect state of all service and socket type units.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: service\n    include:\n      - \'*.service\'\n\n  - name: socket\n    include:\n      - \'*.socket\'\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `systemdunits` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m systemdunits\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m systemdunits -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `systemdunits` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep systemdunits\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep systemdunits /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep systemdunits\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ systemd_service_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.service_unit_state | systemd service unit in the failed state |\n| [ systemd_socket_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.socket_unit_state | systemd socket unit in the failed state |\n| [ systemd_target_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.target_unit_state | systemd target unit in the failed state |\n| [ systemd_path_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.path_unit_state | systemd path unit in the failed state |\n| [ systemd_device_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.device_unit_state | systemd device unit in the failed state |\n| [ systemd_mount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.mount_unit_state | systemd mount unit in the failed state |\n| [ systemd_automount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.automount_unit_state | systemd automount unit in the failed state |\n| [ systemd_swap_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.swap_unit_state | systemd swap unit in the failed state |\n| [ systemd_scope_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.scope_unit_state | systemd scope unit in the failed state |\n| [ systemd_slice_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.slice_unit_state | systemd slice unit in the failed state |\n| [ systemd_timer_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.timer_unit_state | systemd timer unit in the failed state |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per unit\n\nThese metrics refer to the systemd unit.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| unit_name | systemd unit name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.socket_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.target_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.path_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.device_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.mount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.automount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.swap_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.timer_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.scope_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.slice_unit_state | active, inactive, activating, deactivating, failed | state |\n\n### Per unit file\n\nThese metrics refer to the systemd unit file.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| unit_file_name | systemd unit file name |\n| unit_file_type | systemd unit file type |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.unit_file_state | enabled, enabled-runtime, linked, linked-runtime, alias, masked, masked-runtime, static, disabled, indirect, generated, transient, bad | state |\n\n",integration_type:"collector",id:"go.d.plugin-systemdunits-Systemd_Units",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/systemdunits/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-tengine",plugin_name:"go.d.plugin",module_name:"tengine",monitored_instance:{name:"Tengine",link:"https://tengine.taobao.org/",icon_filename:"tengine.jpeg",categories:["data-collection.web-servers-and-proxies"]},keywords:["tengine","web","webserver"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Tengine\n\nPlugin: go.d.plugin\nModule: tengine\n\n## Overview\n\nThis collector monitors Tengine servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **tengine** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **tengine**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/tengine.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable ngx_http_reqstat_module module.\n\nTo enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html).\nThe default line format is the only supported format.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1/us | yes |\n|  | timeout | HTTP request timeout (seconds). | 2 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **tengine** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the tengine data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _tengine_ (or scroll the list) to locate the **tengine** collector.\n5. Click the **+** next to the **tengine** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/tengine.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tengine.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/us\n\n```\n{% /details %}\n###### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1/us\n    username: foo\n    password: bar\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nTengine with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1/us\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n - name: local\n   url: http://127.0.0.1/us\n\n - name: remote\n   url: http://203.0.113.10/us\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `tengine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m tengine\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m tengine -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `tengine` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep tengine\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep tengine /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep tengine\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tengine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tengine.bandwidth_total | in, out | B/s |\n| tengine.connections_total | accepted | connections/s |\n| tengine.requests_total | processed | requests/s |\n| tengine.requests_per_response_code_family_total | 2xx, 3xx, 4xx, 5xx, other | requests/s |\n| tengine.requests_per_response_code_detailed_total | 200, 206, 302, 304, 403, 404, 419, 499, 500, 502, 503, 504, 508, other | requests/s |\n| tengine.requests_upstream_total | requests | requests/s |\n| tengine.tries_upstream_total | calls | calls/s |\n| tengine.requests_upstream_per_response_code_family_total | 4xx, 5xx | requests/s |\n\n",integration_type:"collector",id:"go.d.plugin-tengine-Tengine",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/tengine/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"go.d.plugin",module_name:"tomcat",monitored_instance:{name:"Tomcat",link:"https://tomcat.apache.org/",categories:["data-collection.web-servers-and-proxies"],icon_filename:"tomcat.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["apache","tomcat","webserver","websocket","jakarta","javaEE"]},overview:"# Tomcat\n\nPlugin: go.d.plugin\nModule: tomcat\n\n## Overview\n\nThis collector monitors Tomcat metrics about bandwidth, processing time, threads and more.\n\n\nIt parses the information provided by the [Server Status](https://tomcat.apache.org/tomcat-10.0-doc/manager-howto.html#Server_Status) HTTP endpoint.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nBy default, this Tomcat collector cannot access the server's status page. To enable data collection, you will need to configure access credentials with appropriate permissions.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the Netdata Agent and Tomcat are on the same host, the collector will attempt to connect to the Tomcat server's status page at `http://localhost:8080/manager/status?XML=true`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **tomcat** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **tomcat**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/tomcat.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Access to Tomcat Status Endpoint\n\nThe Netdata Agent needs read-only access to its status endpoint to collect data from the Tomcat server.\n\nYou can achieve this by creating a dedicated user named `netdata` with read-only permissions specifically for accessing the [Server Status](https://tomcat.apache.org/tomcat-10.0-doc/manager-howto.html#Server_Status) endpoint.\n\nOnce you\'ve created the `netdata` user, you\'ll need to configure the username and password in the collector configuration file.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8080 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **tomcat** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the tomcat data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _tomcat_ (or scroll the list) to locate the **tomcat** collector.\n5. Click the **+** next to the **tomcat** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/tomcat.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tomcat.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080\n    username: John\n    password: Doe\n\n```\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8080\n    username: admin1\n    password: hackme1\n\n  - name: remote\n    url: http://192.0.2.1:8080\n    username: admin2\n    password: hackme2\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `tomcat` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m tomcat\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m tomcat -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `tomcat` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep tomcat\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep tomcat /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep tomcat\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tomcat instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.jvm_memory_usage | free, used | bytes |\n\n### Per jvm memory pool\n\nThese metrics refer to the JVM memory pool.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| mempool_name | Memory Pool name. |\n| mempool_type | Memory Pool type. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.jvm_mem_pool_memory_usage | commited, used, max | bytes |\n\n### Per connector\n\nThese metrics refer to the connector.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| connector_name | Connector name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.connector_requests | requests | requests/s |\n| tomcat.connector_bandwidth | received, sent | bytes/s |\n| tomcat.connector_requests_processing_time | processing_time | milliseconds |\n| tomcat.connector_errors | errors | errors/s |\n| tomcat.connector_request_threads | idle, busy | threads |\n\n",integration_type:"collector",id:"go.d.plugin-tomcat-Tomcat",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/tomcat/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-tor",plugin_name:"go.d.plugin",module_name:"tor",monitored_instance:{name:"Tor",link:"https://www.torproject.org/",categories:["data-collection.networking"],icon_filename:"tor.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["tor","traffic","vpn"]},overview:"# Tor\n\nPlugin: go.d.plugin\nModule: tor\n\n## Overview\n\nTracks Tor's download and upload traffic, as well as its uptime.\n\n\nIt reads the server's response to the [GETINFO](https://spec.torproject.org/control-spec/commands.html#getinfo) command.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Tor instances running on localhost that are listening on port 9051.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:9051\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **tor** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **tor**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/tor.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable Control Port\n\nEnable `ControlPort` in `/etc/tor/torrc`.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency. | 1 | no |\n|  | autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| **Target** | address | The IP address and port where the Tor Control Port listens for connections. | 127.0.0.1:9051 | yes |\n|  | timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| **Auth** | password | Password for authentication. |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **tor** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the tor data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _tor_ (or scroll the list) to locate the **tor** collector.\n5. Click the **+** next to the **tor** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/tor.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tor.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:9051\n    password: somePassword\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:9051\n    password: somePassword\n\n  - name: remote\n    address: 203.0.113.0:9051\n    password: somePassword\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `tor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m tor\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m tor -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `tor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep tor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep tor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep tor\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tor.traffic | read, write | KiB/s |\n| tor.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-tor-Tor",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/tor/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-traefik",plugin_name:"go.d.plugin",module_name:"traefik",monitored_instance:{name:"Traefik",link:"https://traefik.io/",icon_filename:"traefik.svg",categories:["data-collection.web-servers-and-proxies"]},keywords:["traefik","proxy","webproxy"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Traefik\n\nPlugin: go.d.plugin\nModule: traefik\n\n## Overview\n\nThis collector monitors Traefik servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **traefik** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **traefik**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/traefik.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="All options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8082/metrics | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **traefik** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the traefik data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _traefik_ (or scroll the list) to locate the **traefik** collector.\n5. Click the **+** next to the **traefik** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/traefik.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/traefik.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8082/metrics\n\n```\n{% /details %}\n###### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8082/metrics\n    username: foo\n    password: bar\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    http://127.0.0.1:8082/metrics\n\n  - name: remote\n    http://192.0.2.0:8082/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `traefik` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m traefik\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m traefik -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `traefik` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep traefik\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep traefik /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep traefik\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per entrypoint, protocol\n\nThese metrics refer to the endpoint.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| traefik.entrypoint_requests | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |\n| traefik.entrypoint_request_duration_average | 1xx, 2xx, 3xx, 4xx, 5xx | milliseconds |\n| traefik.entrypoint_open_connections | a dimension per HTTP method | connections |\n\n",integration_type:"collector",id:"go.d.plugin-traefik-Traefik",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/traefik/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-typesense",plugin_name:"go.d.plugin",module_name:"typesense",monitored_instance:{name:"Typesense",link:"https://typesense.org/",categories:["data-collection.databases"],icon_filename:"typesense.svg"},related_resources:{integrations:{list:[]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["typesense","search engine"]},overview:"# Typesense\n\nPlugin: go.d.plugin\nModule: typesense\n\n## Overview\n\nThis collector monitors the overall health status and performance of your Typesense servers.\nIt gathers detailed metrics, including the total number of requests processed, the breakdown of different request types, and the average latency experienced by each request.\n\n\nIt gathers metrics by periodically issuing HTTP GET requests to the Typesense server:\n\n- [/health](https://typesense.org/docs/27.0/api/cluster-operations.html#health) endpoint to check server health.\n- [/stats.json](https://typesense.org/docs/27.0/api/cluster-operations.html#api-stats) endpoint to collect data on requests and latency.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector can automatically detect Typesense instances running on:\n\n- localhost that are listening on port 8108\n- within Docker containers\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **typesense** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **typesense**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/typesense.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### API Key Configuration\n\nWhile optional, configuring an [API key](https://typesense.org/docs/0.20.0/api/api-keys.html#api-keys) is highly recommended to enable the collector to gather [stats metrics](https://typesense.org/docs/27.0/api/cluster-operations.html#api-stats), including request counts and latency.\nWithout an API key, the collector will only collect health status information.\n\n> If you\'re running Typesense with the API key provided as a command-line parameter (e.g., `--api-key=XYZ`), Netdata can automatically detect and use this key for queries.\n> In this case, no additional configuration is required.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8108 | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | api_key | Typesense API key (`X-TYPESENSE-API-KEY`). See [API Keys](https://typesense.org/docs/0.20.0/api/api-keys.html#api-keys). |  | no |\n|  | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **typesense** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the typesense data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _typesense_ (or scroll the list) to locate the **typesense** collector.\n5. Click the **+** next to the **typesense** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/typesense.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/typesense.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8108\n    api_key: XYZ\n\n```\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8108\n    api_key: XYZ\n\n  - name: remote\n    url: http://192.0.2.1:8108\n    api_key: XYZ\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `typesense` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m typesense\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m typesense -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `typesense` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep typesense\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep typesense /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep typesense\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Typesense instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| typesense.health_status | ok, out_of_disk, out_of_memory | status |\n| typesense.total_requests | requests | requests/s |\n| typesense.requests_by_operation | search, write, import, delete | requests/s |\n| typesense.latency_by_operation | search, write, import, delete | milliseconds |\n| typesense.overloaded_requests | overloaded | requests/s |\n\n",integration_type:"collector",id:"go.d.plugin-typesense-Typesense",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/typesense/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-unbound",plugin_name:"go.d.plugin",module_name:"unbound",monitored_instance:{name:"Unbound",link:"https://nlnetlabs.nl/projects/unbound/about/",icon_filename:"unbound.png",categories:["data-collection.networking"]},keywords:["unbound","dns"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Unbound\n\nPlugin: go.d.plugin\nModule: unbound\n\n## Overview\n\nThis collector monitors Unbound servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **unbound** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **unbound**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/unbound.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable remote control interface\n\nSet `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf).\n\n\n#### Check permissions and adjust if necessary\n\nIf using unix socket:\n\n- socket should be readable and writeable by `netdata` user\n\nIf using ip socket and TLS is disabled:\n\n- socket should be accessible via network\n\nIf TLS is enabled, in addition:\n\n- `control-key-file` should be readable by `netdata` user\n- `control-cert-file` should be readable by `netdata` user\n\nFor auto-detection parameters from `unbound.conf`:\n\n- `unbound.conf` should be readable by `netdata` user\n- if you have several configuration files (include feature) all of them should be readable by `netdata` user\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency. | 5 | no |\n|  | autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| **Target** | address | Server address in IP:PORT format. | 127.0.0.1:8953 | yes |\n|  | timeout | Connection/read/write/SSL handshake timeout. | 1 | no |\n|  | conf_path | Absolute path to the Unbound configuration file. Used to adjust behavior based on the `remote-control` section. | /etc/unbound/unbound.conf | no |\n| **Customization** | cumulative_stats | Statistics collection mode. Should match the `statistics-cumulative` parameter in the Unbound configuration file. | no | no |\n| **TLS** | use_tls | Whether to use TLS or not. | yes | no |\n|  | tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no |\n|  | tls_ca | Certificate authority that the client uses when verifying server certificates. |  | no |\n|  | tls_cert | Client TLS certificate. | /etc/unbound/unbound_control.pem | no |\n|  | tls_key | Client TLS key. | /etc/unbound/unbound_control.key | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **unbound** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the unbound data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _unbound_ (or scroll the list) to locate the **unbound** collector.\n5. Click the **+** next to the **unbound** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/unbound.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/unbound.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:8953\n\n```\n{% /details %}\n###### Unix socket\n\nConnecting through Unix socket.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: socket\n    address: /var/run/unbound.sock\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:8953\n\n  - name: remote\n    address: 203.0.113.11:8953\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `unbound` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m unbound\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m unbound -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `unbound` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep unbound\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep unbound /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep unbound\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Unbound instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.queries | queries | queries |\n| unbound.queries_ip_ratelimited | ratelimited | queries |\n| unbound.dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.cache | hits, miss | events |\n| unbound.cache_percentage | hits, miss | percentage |\n| unbound.prefetch | prefetches | prefetches |\n| unbound.expired | expired | replies |\n| unbound.zero_ttl_replies | zero_ttl | replies |\n| unbound.recursive_replies | recursive | replies |\n| unbound.recursion_time | avg, median | milliseconds |\n| unbound.request_list_usage | avg, max | queries |\n| unbound.current_request_list_usage | all, users | queries |\n| unbound.request_list_jostle_list | overwritten, dropped | queries |\n| unbound.tcpusage | usage | buffers |\n| unbound.uptime | time | seconds |\n| unbound.cache_memory | message, rrset, dnscrypt_nonce, dnscrypt_shared_secret | KB |\n| unbound.mod_memory | iterator, respip, validator, subnet, ipsec | KB |\n| unbound.mem_streamwait | streamwait | KB |\n| unbound.cache_count | infra, key, msg, rrset, dnscrypt_nonce, shared_secret | items |\n| unbound.type_queries | a dimension per query type | queries |\n| unbound.class_queries | a dimension per query class | queries |\n| unbound.opcode_queries | a dimension per query opcode | queries |\n| unbound.flag_queries | qr, aa, tc, rd, ra, z, ad, cd | queries |\n| unbound.rcode_answers | a dimension per reply rcode | replies |\n\n### Per thread\n\nThese metrics refer to threads.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.thread_queries | queries | queries |\n| unbound.thread_queries_ip_ratelimited | ratelimited | queries |\n| unbound.thread_dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.thread_cache | hits, miss | events |\n| unbound.thread_cache_percentage | hits, miss | percentage |\n| unbound.thread_prefetch | prefetches | prefetches |\n| unbound.thread_expired | expired | replies |\n| unbound.thread_zero_ttl_replies | zero_ttl | replies |\n| unbound.thread_recursive_replies | recursive | replies |\n| unbound.thread_recursion_time | avg, median | milliseconds |\n| unbound.thread_request_list_usage | avg, max | queries |\n| unbound.thread_current_request_list_usage | all, users | queries |\n| unbound.thread_request_list_jostle_list | overwritten, dropped | queries |\n| unbound.thread_tcpusage | usage | buffers |\n\n",integration_type:"collector",id:"go.d.plugin-unbound-Unbound",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/unbound/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-upsd",plugin_name:"go.d.plugin",module_name:"upsd",monitored_instance:{name:"UPS (NUT)",link:"",icon_filename:"plug-circle-bolt.svg",categories:["data-collection.hardware-and-sensors"]},keywords:["ups","nut"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# UPS (NUT)\n\nPlugin: go.d.plugin\nModule: upsd\n\n## Overview\n\nThis collector monitors Uninterruptible Power Supplies by polling the UPS daemon using the NUT network protocol.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **upsd** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **upsd**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/upsd.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency. | 1 | no |\n|  | autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| **Target** | address | UPS daemon address in IP:PORT format. | 127.0.0.1:3493 | yes |\n|  | timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n| **Auth** | username | Username for authentication. |  | no |\n|  | password | Password for authentication. |  | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **upsd** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the upsd data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _upsd_ (or scroll the list) to locate the **upsd** collector.\n5. Click the **+** next to the **upsd** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/upsd.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/upsd.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:3493\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:3493\n\n  - name: remote\n    address: 203.0.113.0:3493\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `upsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m upsd\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m upsd -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `upsd` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep upsd\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep upsd /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep upsd\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ upsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} average load over the last 10 minutes |\n| [ upsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_battery_charge | UPS ${label:ups_name} average battery charge over the last minute |\n| [ upsd_ups_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} number of seconds since the last successful data collection |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nThese metrics refer to the UPS unit.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| ups_name | UPS name. |\n| battery_type | Battery type (chemistry). "battery.type" variable value. |\n| device_model | Device model. "device.mode" variable value. |\n| device_serial | Device serial number. "device.serial" variable value. |\n| device_manufacturer | Device manufacturer. "device.mfr" variable value. |\n| device_type | Device type (ups, pdu, scd, psu, ats). "device.type" variable value. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| upsd.ups_load | load | percentage |\n| upsd.ups_load_usage | load_usage | Watts |\n| upsd.ups_status | on_line, on_battery, low_battery, high_battery, replace_battery, charging, discharging, bypass, calibration, offline, overloaded, trim_input_voltage, boost_input_voltage, forced_shutdown, other | status |\n| upsd.ups_temperature | temperature | Celsius |\n| upsd.ups_battery_charge | charge | percentage |\n| upsd.ups_battery_estimated_runtime | runtime | seconds |\n| upsd.ups_battery_voltage | voltage | Volts |\n| upsd.ups_battery_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_voltage | voltage | Volts |\n| upsd.ups_input_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_current | current | Ampere |\n| upsd.ups_input_current_nominal | nominal_current | Ampere |\n| upsd.ups_input_frequency | frequency | Hz |\n| upsd.ups_input_frequency_nominal | nominal_frequency | Hz |\n| upsd.ups_output_voltage | voltage | Volts |\n| upsd.ups_output_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_output_current | current | Ampere |\n| upsd.ups_output_current_nominal | nominal_current | Ampere |\n| upsd.ups_output_frequency | frequency | Hz |\n| upsd.ups_output_frequency_nominal | nominal_frequency | Hz |\n\n',integration_type:"collector",id:"go.d.plugin-upsd-UPS_(NUT)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/upsd/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-uwsgi",plugin_name:"go.d.plugin",module_name:"uwsgi",monitored_instance:{name:"uWSGI",link:"https://uwsgi-docs.readthedocs.io/en/latest/",categories:["data-collection.web-servers-and-proxies"],icon_filename:"uwsgi.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["application server","python","web applications"]},overview:"# uWSGI\n\nPlugin: go.d.plugin\nModule: uwsgi\n\n## Overview\n\nMonitors UWSGI worker health and performance by collecting metrics like requests, transmitted data, exceptions, and harakiris.\n\n\nIt fetches [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) statistics over TCP.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAutomatically discovers and collects UWSGI statistics from the following default locations:\n\n- localhost:1717\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **uwsgi** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **uwsgi**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/uwsgi.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Enable the uWSGI Stats Server\n\nSee [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) for details.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency. | 1 | no |\n|  | autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| **Target** | address | The IP address and port where the UWSGI [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) listens for connections. | 127.0.0.1:1717 | yes |\n|  | timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **uwsgi** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the uwsgi data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _uwsgi_ (or scroll the list) to locate the **uwsgi** collector.\n5. Click the **+** next to the **uwsgi** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/uwsgi.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/uwsgi.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:1717\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:1717\n\n  - name: remote\n    address: 203.0.113.0:1717\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `uwsgi` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m uwsgi\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m uwsgi -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `uwsgi` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep uwsgi\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep uwsgi /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep uwsgi\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uWSGI instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| uwsgi.transmitted_data | tx | bytes/s |\n| uwsgi.requests | requests | requests/s |\n| uwsgi.harakiris | harakiris | harakiris/s |\n| uwsgi.respawns | respawns | respawns/s |\n\n### Per worker\n\nThese metrics refer to the Worker process.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| worker_id | Worker ID. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| uwsgi.worker_transmitted_data | tx | bytes/s |\n| uwsgi.worker_requests | requests | requests/s |\n| uwsgi.worker_delta_requests | delta_requests | requests/s |\n| uwsgi.worker_average_request_time | avg | milliseconds |\n| uwsgi.worker_harakiris | harakiris | harakiris/s |\n| uwsgi.worker_exceptions | exceptions | exceptions/s |\n| uwsgi.worker_status | idle, busy, cheap, pause, sig | status |\n| uwsgi.worker_request_handling_status | accepting, not_accepting | status |\n| uwsgi.worker_respawns | respawns | respawns/s |\n| uwsgi.worker_memory_rss | rss | bytes |\n| uwsgi.worker_memory_vsz | vsz | bytes |\n\n",integration_type:"collector",id:"go.d.plugin-uwsgi-uWSGI",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/uwsgi/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"go.d.plugin",module_name:"varnish",monitored_instance:{name:"Varnish",link:"https://varnish-cache.org/",categories:["data-collection.web-servers-and-proxies"],icon_filename:"varnish.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["varnish","varnishstat","varnishd","cache","web server","web cache"]},overview:"# Varnish\n\nPlugin: go.d.plugin\nModule: varnish\n\n## Overview\n\nThis collector monitors Varnish instances, supporting both the open-source Varnish-Cache and the commercial Varnish-Plus.\n\nIt tracks key performance metrics, along with detailed statistics for Backends (VBE) and Storages (SMF, SMA, MSE).\n\nIt relies on the [`varnishstat`](https://varnish-cache.org/docs/trunk/reference/varnishstat.html) CLI tool but avoids directly executing the binary.\nInstead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.\nThis approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAutomatically detects and monitors Varnish instances running on the host or inside Docker containers.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **varnish** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **varnish**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/varnish.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency. | 10 | no |\n|  | timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n| **Target** | instance_name | Specifies the name of the Varnish instance to collect metrics from. This corresponds to the `-n` argument used with the [varnishstat](https://varnish-cache.org/docs/trunk/reference/varnishstat.html) command. |  | no |\n|  | docker_container | Specifies the name of the Docker container where the Varnish instance is running. If set, the `varnishstat` command will be executed within this container. |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **varnish** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the varnish data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _varnish_ (or scroll the list) to locate the **varnish** collector.\n5. Click the **+** next to the **varnish** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/varnish.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/varnish.conf\n```\n\n##### Examples\n\n###### Custom update_every\n\nAllows you to override the default data collection interval.\n\n```yaml\njobs:\n  - name: varnish\n    update_every: 5\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `varnish` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m varnish\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m varnish -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `varnish` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep varnish\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep varnish /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep varnish\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Varnish instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.client_session_connections | accepted, dropped | connections/s |\n| varnish.client_requests | received | requests/s |\n| varnish.cache_hit_ratio_total | hit, miss, hitpass, hitmiss | percent |\n| varnish.cache_hit_ratio_delta | hit, miss, hitpass, hitmiss | percent |\n| varnish.cache_expired_objects | expired | objects/s |\n| varnish.cache_lru_activity | nuked, moved | objects/s |\n| varnish.threads | threads | threads |\n| varnish.thread_management_activity | created, failed, destroyed, limited | threads/s |\n| varnish.thread_queue_len | queue_length | threads |\n| varnish.backends_requests | sent | requests/s |\n| varnish.esi_parsing_issues | errors, warnings | issues/s |\n| varnish.mgmt_process_uptime | uptime | seconds |\n| varnish.child_process_uptime | uptime | seconds |\n\n### Per Backend\n\nThese metrics refer to the Backend (VBE).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.backend_data_transfer | req_header, req_body, resp_header, resp_body | bytes/s |\n\n### Per Storage\n\nThese metrics refer to the Storage (SMA, SMF, MSE).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.storage_space_usage | free, used | bytes |\n| varnish.storage_allocated_objects | allocated | objects |\n\n",integration_type:"collector",id:"go.d.plugin-varnish-Varnish",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/varnish/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-vcsa",plugin_name:"go.d.plugin",module_name:"vcsa",monitored_instance:{name:"vCenter Server Appliance",link:"https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vcsa.doc/GUID-223C2821-BD98-4C7A-936B-7DBE96291BA4.html",icon_filename:"vmware.svg",categories:["data-collection.containers-and-vms"]},keywords:["vmware"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# vCenter Server Appliance\n\nPlugin: go.d.plugin\nModule: vcsa\n\n## Overview\n\nThis collector monitors [health statistics](https://developer.vmware.com/apis/vsphere-automation/latest/appliance/health/) of vCenter Server Appliance servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **vcsa** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **vcsa**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/vcsa.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. |  | yes |\n|  | timeout | HTTP request timeout (seconds). | 5 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | yes |\n|  | password | Password for Basic HTTP authentication. |  | yes |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **vcsa** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the vcsa data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _vcsa_ (or scroll the list) to locate the **vcsa** collector.\n5. Click the **+** next to the **vcsa** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/vcsa.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vcsa.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: vcsa1\n    url: https://203.0.113.1\n    username: admin@vsphere.local\n    password: password\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nTwo instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: vcsa1\n    url: https://203.0.113.1\n    username: admin@vsphere.local\n    password: password\n\n  - name: vcsa2\n    url: https://203.0.113.10\n    username: admin@vsphere.local\n    password: password\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `vcsa` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m vcsa\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m vcsa -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `vcsa` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep vcsa\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep vcsa /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep vcsa\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ vcsa_system_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is orange. One or more components are degraded. |\n| [ vcsa_system_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is red. One or more components are unavailable or will stop functioning soon. |\n| [ vcsa_applmgmt_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_applmgmt_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_load_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_load_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_mem_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_mem_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_swap_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_swap_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_database_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_database_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_software_packages_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.software_packages_health_status | VCSA software packages security updates are available. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vCenter Server Appliance instance\n\nThese metrics refer to the entire monitored application.\n<details>\n<summary>See health statuses</summary>\nOverall System Health:\n\n| Status  | Description                                                                                                              |\n|:-------:|:-------------------------------------------------------------------------------------------------------------------------|\n|  green  | All components in the appliance are healthy.                                                                             |\n| yellow  | One or more components in the appliance might become overloaded soon.                                                    |\n| orange  | One or more components in the appliance might be degraded.                                                               |\n|   red   | One or more components in the appliance might be in an unusable status and the appliance might become unresponsive soon. |\n|  gray   | No health data is available.                                                                                             |\n| unknown | Collector failed to decode status.                                                                                       |\n\nComponents Health:\n\n| Status  | Description                                                  |\n|:-------:|:-------------------------------------------------------------|\n|  green  | The component is healthy.                                    |\n| yellow  | The component is healthy, but may have some problems.        |\n| orange  | The component is degraded, and may have serious problems.    |\n|   red   | The component is unavailable, or will stop functioning soon. |\n|  gray   | No health data is available.                                 |\n| unknown | Collector failed to decode status.                           |\n\nSoftware Updates Health:\n\n| Status  | Description                                          |\n|:-------:|:-----------------------------------------------------|\n|  green  | No updates available.                                |\n| orange  | Non-security patches might be available.             |\n|   red   | Security patches might be available.                 |\n|  gray   | An error retrieving information on software updates. |\n| unknown | Collector failed to decode status.                   |\n\n</details>\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vcsa.system_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.applmgmt_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.load_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.mem_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.swap_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.database_storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.software_packages_health_status | green, red, orange, gray, unknown | status |\n\n",integration_type:"collector",id:"go.d.plugin-vcsa-vCenter_Server_Appliance",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/vcsa/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-vernemq",plugin_name:"go.d.plugin",module_name:"vernemq",monitored_instance:{name:"VerneMQ",link:"https://vernemq.com",icon_filename:"vernemq.svg",categories:["data-collection.databases"]},keywords:["vernemq","message brokers"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# VerneMQ\n\nPlugin: go.d.plugin\nModule: vernemq\n\n## Overview\n\nThis collector monitors VerneMQ instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **vernemq** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **vernemq**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/vernemq.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:8888/metrics | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **vernemq** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the vernemq data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _vernemq_ (or scroll the list) to locate the **vernemq** collector.\n5. Click the **+** next to the **vernemq** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/vernemq.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vernemq.conf\n```\n\n##### Examples\n\n###### Basic\n\nAn example configuration.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8888/metrics\n\n```\n{% /details %}\n###### HTTP authentication\n\nLocal instance with basic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8888/metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:8888/metrics\n\n  - name: remote\n    url: http://203.0.113.10:8888/metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `vernemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m vernemq\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m vernemq -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `vernemq` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep vernemq\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep vernemq /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep vernemq\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_socket_errors | Node ${label:node} socket errors in the last minute |\n| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_queue_undelivered_messages | Node ${label:node} dropped messages due to full queues in the last minute |\n| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_queue_undelivered_messages | Node ${label:node} expired before delivery messages in the last minute |\n| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_queue_undelivered_messages | Node ${label:node} unhandled messages in the last minute |\n| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_average_scheduler_utilization | Node ${label:node} scheduler utilization over the last 10 minutes |\n| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_cluster_dropped | Node ${label:node} traffic dropped during communication with the cluster nodes in the last minute |\n| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_netsplits | Node ${label:node} detected netsplits (split brain) in the last minute |\n| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_connack_sent_by_reason_code | Node ${label:node} unsuccessful sent v5 CONNACK packets in the last minute |\n| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_disconnect_received_by_reason_code | Node ${label:node} received not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_disconnect_sent_by_reason_code | Node ${label:node} sent not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_subscribe_error | Node ${label:node} mqtt v${label:mqtt_version} failed SUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_subscribe_auth_error | Node ${label:node} mqtt v${label:mqtt_version} unauthorized SUBSCRIBE attempts in the last minute |\n| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_unsubscribe_error | Node ${label:node} mqtt v${label:mqtt_version} failed UNSUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_publish_errors | Node ${label:node} mqtt v${label:mqtt_version} failed PUBLISH operations in the last minute |\n| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_publish_auth_errors | Node ${label:node} mqtt v${label:mqtt_version} unauthorized PUBLISH attempts in the last minute |\n| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_puback_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_puback_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_puback_invalid_error | Node ${label:node} mqtt v${label:mqtt_version} received unexpected PUBACK messages in the last minute |\n| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrec_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrec_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrec_invalid_error | Node ${label:node} mqtt v${label:mqtt_version} received invalid PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrel_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubrel_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubcomp_received_by_reason_code | Node ${label:node} mqtt v5 received unsuccessful PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubcomp_sent_by_reason_code | Node ${label:node} mqtt v5 unsuccessful sent  PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.node_mqtt_pubcomp_invalid_error | Node ${label:node} mqtt v${label:mqtt_version} received unexpected PUBCOMP packets in the last minute |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the VerneMQ node.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | The value of this label is identical to the value of the "node" label exposed by VerneMQ. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vernemq.node_socket | open | sockets |\n| vernemq.node_socket_operations | open, close | sockets/s |\n| vernemq.node_client_keepalive_expired | closed | sockets/s |\n| vernemq.node_socket_close_timeout | closed | sockets/s |\n| vernemq.node_socket_errors | errors | errors/s |\n| vernemq.node_queue_processes | queue_processes | queue processes |\n| vernemq.node_queue_processes_operations | setup, teardown | events/s |\n| vernemq.node_queue_process_init_from_storage | queue_processes | queue processes/s |\n| vernemq.node_queue_messages | received, sent | messages/s |\n| vernemq.node_queued_messages | queued | messages |\n| vernemq.node_queue_undelivered_messages | dropped, expired, unhandled | messages/s |\n| vernemq.node_router_subscriptions | subscriptions | subscriptions |\n| vernemq.node_router_matched_subscriptions | local, remote | subscriptions/s |\n| vernemq.node_router_memory | used | bytes |\n| vernemq.node_average_scheduler_utilization | utilization | percentage |\n| vernemq.node_system_processes | processes | processes |\n| vernemq.node_system_reductions | reductions | ops/s |\n| vernemq.node_system_context_switches | context_switches | ops/s |\n| vernemq.node_system_io | received, sent | bytes/s |\n| vernemq.node_system_run_queue | ready | processes |\n| vernemq.node_system_gc_count | gc | ops/s |\n| vernemq.node_system_gc_words_reclaimed | words_reclaimed | ops/s |\n| vernemq.node_system_allocated_memory | processes, system | bytes |\n| vernemq.node_traffic | received, sent | bytes/s |\n| vernemq.node_retain_messages | messages | messages |\n| vernemq.node_retain_memory | used | bytes |\n| vernemq.node_cluster_traffic | received, sent | bytes/s |\n| vernemq.node_cluster_dropped | dropped | bytes/s |\n| vernemq.node_netsplit_unresolved | unresolved | netsplits |\n| vernemq.node_netsplits | resolved, detected | netsplits/s |\n| vernemq.node_uptime | time | seconds |\n\n### Per mqtt\n\nThese metrics are specific to the used MQTT protocol version.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | The value of this label is identical to the value of the "node" label exposed by VerneMQ. |\n| mqtt_version | MQTT version. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vernemq.node_mqtt_auth | received, sent | packets/s |\n| vernemq.node_mqtt_auth_received_by_reason_code | success, continue_authentication, reauthenticate | packets/s |\n| vernemq.node_mqtt_auth_sent_by_reason_code | success, continue_authentication, reauthenticate | packets/s |\n| vernemq.node_mqtt_connect | connect, connack | packets/s |\n| vernemq.node_mqtt_connack_sent_by_return_code | success, unsupported_protocol_version, client_identifier_not_valid, server_unavailable, bad_username_or_password, not_authorized | packets/s |\n| vernemq.node_mqtt_connack_sent_by_reason_code | success, unspecified_error, malformed_packet, protocol_error, impl_specific_error, unsupported_protocol_version, client_identifier_not_valid, bad_username_or_password, not_authorized, server_unavailable, server_busy, banned, bad_authentication_method, topic_name_invalid, packet_too_large, quota_exceeded, payload_format_invalid, retain_not_supported, qos_not_supported, use_another_server, server_moved, connection_rate_exceeded | packets/s |\n| vernemq.node_mqtt_disconnect | received, sent | packets/s |\n| vernemq.node_mqtt_disconnect_received_by_reason_code | normal_disconnect, disconnect_with_will_msg, unspecified_error, malformed_packet, protocol_error, impl_specific_error, topic_name_invalid, receive_max_exceeded, topic_alias_invalid, packet_too_large, message_rate_too_high, quota_exceeded, administrative_action, payload_format_invalid | packets/s |\n| node_mqtt_disconnect_sent_by_reason_code | normal_disconnect, unspecified_error, malformed_packet, protocol_error, impl_specific_error, not_authorized, server_busy, server_shutting_down, keep_alive_timeout, session_taken_over, topic_filter_invalid, topic_name_invalid, receive_max_exceeded, topic_alias_invalid, packet_too_large, message_rate_too_high, quota_exceeded, administrative_action, payload_format_invalid, retain_not_supported, qos_not_supported, use_another_server, server_moved, shared_subs_not_supported, connection_rate_exceeded, max_connect_time, subscription_ids_not_supported, wildcard_subs_not_supported | packets/s |\n| vernemq.node_mqtt_subscribe | subscribe, suback | packets/s |\n| vernemq.node_mqtt_subscribe_error | subscribe | errors/s |\n| vernemq.node_mqtt_subscribe_auth_error | subscribe_auth | errors/s |\n| vernemq.node_mqtt_unsubscribe | unsubscribe, unsuback | packets/s |\n| vernemq.node_mqtt_unsubscribe_error | unsubscribe | errors/s |\n| vernemq.node_mqtt_publish | received, sent | packets/s |\n| vernemq.node_mqtt_publish_errors | publish | errors/s |\n| vernemq.node_mqtt_publish_auth_errors | publish_auth | errors/s |\n| vernemq.node_mqtt_puback | received, sent | packets/s |\n| vernemq.node_mqtt_puback_received_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s |\n| vernemq.node_mqtt_puback_sent_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s |\n| vernemq.node_mqtt_puback_invalid_error | unexpected | messages/s |\n| vernemq.node_mqtt_pubrec | received, sent | packets/s |\n| vernemq.node_mqtt_pubrec_received_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s |\n| vernemq.node_mqtt_pubrec_sent_by_reason_code | success, no_matching_subscribers, unspecified_error, impl_specific_error, not_authorized, topic_name_invalid, packet_id_in_use, quota_exceeded, payload_format_invalid | packets/s |\n| vernemq.node_mqtt_pubrec_invalid_error | unexpected | messages/s |\n| vernemq.node_mqtt_pubrel | received, sent | packets/s |\n| vernemq.node_mqtt_pubrel_received_by_reason_code | success, packet_id_not_found | packets/s |\n| vernemq.node_mqtt_pubrel_sent_by_reason_code | success, packet_id_not_found | packets/s |\n| vernemq.node_mqtt_pubcomp | received, sent | packets/s |\n| vernemq.node_mqtt_pubcomp_received_by_reason_code | success, packet_id_not_found | packets/s |\n| vernemq.node_mqtt_pubcomp_sent_by_reason_cod | success, packet_id_not_found | packets/s |\n| vernemq.node_mqtt_pubcomp_invalid_error | unexpected | messages/s |\n| vernemq.node_mqtt_ping | pingreq, pingresp | packets/s |\n\n',integration_type:"collector",id:"go.d.plugin-vernemq-VerneMQ",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/vernemq/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-vsphere",plugin_name:"go.d.plugin",module_name:"vsphere",monitored_instance:{name:"VMware vCenter Server",link:"https://www.vmware.com/products/vcenter-server.html",icon_filename:"vmware.svg",categories:["data-collection.containers-and-vms"]},keywords:["vmware","esxi","vcenter"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# VMware vCenter Server\n\nPlugin: go.d.plugin\nModule: vsphere\n\n## Overview\n\nThis collector monitors hosts and vms performance statistics from `vCenter` servers.\n\n> **Warning**: The `vsphere` collector cannot re-login and continue collecting metrics after a vCenter reboot.\n> go.d.plugin needs to be restarted.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default `update_every` is 20 seconds, and it doesn't make sense to decrease the value.\n**VMware real-time statistics are generated at the 20-second specificity**.\n\nIt is likely that 20 seconds is not enough for big installations and the value should be tuned.\n\nTo get a better view we recommend running the collector in debug mode and seeing how much time it will take to collect metrics.\n\n<details>\n<summary>Example (all not related debug lines were removed)</summary>\n\n```\n[ilyam@pc]$ ./go.d.plugin -d -m vsphere\n[ DEBUG ] vsphere[vsphere] discover.go:94 discovering : starting resource discovering process\n[ DEBUG ] vsphere[vsphere] discover.go:102 discovering : found 3 dcs, process took 49.329656ms\n[ DEBUG ] vsphere[vsphere] discover.go:109 discovering : found 12 folders, process took 49.538688ms\n[ DEBUG ] vsphere[vsphere] discover.go:116 discovering : found 3 clusters, process took 47.722692ms\n[ DEBUG ] vsphere[vsphere] discover.go:123 discovering : found 2 hosts, process took 52.966995ms\n[ DEBUG ] vsphere[vsphere] discover.go:130 discovering : found 2 vms, process took 49.832979ms\n[ INFO  ] vsphere[vsphere] discover.go:140 discovering : found 3 dcs, 12 folders, 3 clusters (2 dummy), 2 hosts, 3 vms, process took 249.655993ms\n[ DEBUG ] vsphere[vsphere] build.go:12 discovering : building : starting building resources process\n[ INFO  ] vsphere[vsphere] build.go:23 discovering : building : built 3/3 dcs, 12/12 folders, 3/3 clusters, 2/2 hosts, 3/3 vms, process took 63.3\xb5s\n[ DEBUG ] vsphere[vsphere] hierarchy.go:10 discovering : hierarchy : start setting resources hierarchy process\n[ INFO  ] vsphere[vsphere] hierarchy.go:18 discovering : hierarchy : set 3/3 clusters, 2/2 hosts, 3/3 vms, process took 6.522\xb5s\n[ DEBUG ] vsphere[vsphere] filter.go:24 discovering : filtering : starting filtering resources process\n[ DEBUG ] vsphere[vsphere] filter.go:45 discovering : filtering : removed 0 unmatched hosts\n[ DEBUG ] vsphere[vsphere] filter.go:56 discovering : filtering : removed 0 unmatched vms\n[ INFO  ] vsphere[vsphere] filter.go:29 discovering : filtering : filtered 0/2 hosts, 0/3 vms, process took 42.973\xb5s\n[ DEBUG ] vsphere[vsphere] metric_lists.go:14 discovering : metric lists : starting resources metric lists collection process\n[ INFO  ] vsphere[vsphere] metric_lists.go:30 discovering : metric lists : collected metric lists for 2/2 hosts, 3/3 vms, process took 275.60764ms\n[ INFO  ] vsphere[vsphere] discover.go:74 discovering : discovered 2/2 hosts, 3/3 vms, the whole process took 525.614041ms\n[ INFO  ] vsphere[vsphere] discover.go:11 starting discovery process, will do discovery every 5m0s\n[ DEBUG ] vsphere[vsphere] collect.go:11 starting collection process\n[ DEBUG ] vsphere[vsphere] scrape.go:48 scraping : scraped metrics for 2/2 hosts, process took 96.257374ms\n[ DEBUG ] vsphere[vsphere] scrape.go:60 scraping : scraped metrics for 3/3 vms, process took 57.879697ms\n[ DEBUG ] vsphere[vsphere] collect.go:23 metrics collected, process took 154.77997ms\n```\n\n</details>\n\nThere you can see that discovering took `525.614041ms`, and collecting metrics took `154.77997ms`. Discovering is a separate thread, it doesn't affect collecting.\n`update_every` and `timeout` parameters should be adjusted based on these numbers.\n\n",setup:"## Setup\n\n\nYou can configure the **vsphere** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **vsphere**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/vsphere.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1/server-status?auto | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **Discovery** | discovery_interval | Hosts and VMs discovery interval (seconds). | 300 | no |\n| **Filters** | [host_include](#option-filters-host-include) | Hosts selector (filter). | /* | no |\n|  | [vm_include](#option-filters-vm-include) | VM selector (filter). | /* | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | yes |\n|  | password | Password for Basic HTTP authentication. |  | yes |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n<a id=\"option-filters-host-include\"></a>\n##### host_include\n\nMetrics of hosts matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern\".\n- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n  ```yaml\n  host_include:\n    - '/DC1/*'           # all hosts from datacenter DC1\n    - '/DC2/*/!Host2 *'  # all hosts from datacenter DC2 except HOST2\n    - '/DC3/Cluster3/*'  # all hosts from DC3, cluster Cluster3\n  ```\n\n\n<a id=\"option-filters-vm-include\"></a>\n##### vm_include\n\nMetrics of VMs matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern/VM pattern\".\n- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n  ```yaml\n  vm_include:\n    - '/DC1/*'           # all VMs from datacenter DC1\n    - '/DC2/*/*/!VM2 *'  # all VMs from DC2 except VM2\n    - '/DC3/Cluster3/*'  # all VMs from DC3, cluster Cluster3\n  ```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **vsphere** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the vsphere data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _vsphere_ (or scroll the list) to locate the **vsphere** collector.\n5. Click the **+** next to the **vsphere** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/vsphere.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vsphere.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name     : vcenter1\n    url      : https://203.0.113.1\n    username : admin@vsphere.local\n    password : somepassword\n\n```\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n  - name     : vcenter1\n    url      : https://203.0.113.1\n    username : admin@vsphere.local\n    password : somepassword\n\n  - name     : vcenter2\n    url      : https://203.0.113.10\n    username : admin@vsphere.local\n    password : somepassword\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `vsphere` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m vsphere\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m vsphere -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `vsphere` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep vsphere\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep vsphere /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep vsphere\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ vsphere_vm_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_cpu_utilization | Virtual Machine CPU utilization |\n| [ vsphere_vm_mem_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_mem_utilization | Virtual Machine memory utilization |\n| [ vsphere_host_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_cpu_utilization | ESXi Host CPU utilization |\n| [ vsphere_host_mem_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_mem_utilization | ESXi Host memory utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per virtual machine\n\nThese metrics refer to the Virtual Machine.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n| vm | Virtual Machine name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.vm_cpu_utilization | used | percentage |\n| vsphere.vm_mem_utilization | used | percentage |\n| vsphere.vm_mem_usage | granted, consumed, active, shared | KiB |\n| vsphere.vm_mem_swap_usage | swapped | KiB |\n| vsphere.vm_mem_swap_io | in, out | KiB/s |\n| vsphere.vm_disk_io | read, write | KiB/s |\n| vsphere.vm_disk_max_latency | latency | milliseconds |\n| vsphere.vm_net_traffic | received, sent | KiB/s |\n| vsphere.vm_net_packets | received, sent | packets |\n| vsphere.vm_net_drops | received, sent | packets |\n| vsphere.vm_overall_status | green, red, yellow, gray | status |\n| vsphere.vm_system_uptime | uptime | seconds |\n\n### Per host\n\nThese metrics refer to the ESXi host.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.host_cpu_utilization | used | percentage |\n| vsphere.host_mem_utilization | used | percentage |\n| vsphere.host_mem_usage | granted, consumed, active, shared, sharedcommon | KiB |\n| vsphere.host_mem_swap_io | in, out | KiB/s |\n| vsphere.host_disk_io | read, write | KiB/s |\n| vsphere.host_disk_max_latency | latency | milliseconds |\n| vsphere.host_net_traffic | received, sent | KiB/s |\n| vsphere.host_net_packets | received, sent | packets |\n| vsphere.host_net_drops | received, sent | packets |\n| vsphere.host_net_errors | received, sent | errors |\n| vsphere.host_overall_status | green, red, yellow, gray | status |\n| vsphere.host_system_uptime | uptime | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-vsphere-VMware_vCenter_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/vsphere/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"go.d.plugin",module_name:"w1sensor",monitored_instance:{name:"1-Wire Sensors",link:"https://www.analog.com/en/product-category/1wire-temperature-sensors.html",categories:["data-collection.hardware-and-sensors"],icon_filename:"1-wire.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["temperature","sensor","1-wire"]},overview:"# 1-Wire Sensors\n\nPlugin: go.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **w1sensor** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **w1sensor**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/w1sensor.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 1 | no |\n| sensors_path | Directory path containing sensor folders with w1_slave files. | /sys/bus/w1/devices | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **w1sensor** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the w1sensor data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _w1sensor_ (or scroll the list) to locate the **w1sensor** collector.\n5. Click the **+** next to the **w1sensor** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/w1sensor.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/w1sensor.conf\n```\n\n##### Examples\n\n###### Custom sensor device path\n\nMonitors a virtual sensor when the w1_slave file is located in a custom directory instead of the default location.\n\n```yaml\njobs:\n  - name: custom_sensors_path\n    sensors_path: /custom/path/devices\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `w1sensor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m w1sensor\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m w1sensor -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `w1sensor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep w1sensor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep w1sensor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep w1sensor\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the 1-Wire Sensor.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temperature | temperature | Celsius |\n\n",integration_type:"collector",id:"go.d.plugin-w1sensor-1-Wire_Sensors",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/w1sensor/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-web_log",plugin_name:"go.d.plugin",module_name:"web_log",monitored_instance:{name:"Web server log files",link:"",categories:["data-collection.web-servers-and-proxies"],icon_filename:"webservers.svg"},keywords:["webserver","apache","httpd","nginx","lighttpd","logs"],info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# Web server log files\n\nPlugin: go.d.plugin\nModule: web_log\n\n## Overview\n\nThis collector monitors web servers by parsing their log files.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects log files of web servers running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **web_log** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **web_log**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/web_log.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nWeblog is aware of how to parse and interpret the following fields (**known fields**):\n\n> [nginx](https://nginx.org/en/docs/varindex.html)\n>\n> [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html)\n\n| nginx                   | apache   | description                                                                              |\n|-------------------------|----------|------------------------------------------------------------------------------------------|\n| $host ($http_host)      | %v       | Name of the server which accepted a request.                                             |\n| $server_port            | %p       | Port of the server which accepted a request.                                             |\n| $scheme                 | -        | Request scheme. "http" or "https".                                                       |\n| $remote_addr            | %a (%h)  | Client address.                                                                          |\n| $request                | %r       | Full original request line. The line is "$request_method $request_uri $server_protocol". |\n| $request_method         | %m       | Request method. Usually "GET" or "POST".                                                 |\n| $request_uri            | %U       | Full original request URI.                                                               |\n| $server_protocol        | %H       | Request protocol. Usually "HTTP/1.0", "HTTP/1.1", or "HTTP/2.0".                         |\n| $status                 | %s (%>s) | Response status code.                                                                    |\n| $request_length         | %I       | Bytes received from a client, including request and headers.                             |\n| $bytes_sent             | %O       | Bytes sent to a client, including request and headers.                                   |\n| $body_bytes_sent        | %B (%b)  | Bytes sent to a client, not counting the response header.                                |\n| $request_time           | %D       | Request processing time.                                                                 |\n| $upstream_response_time | -        | Time spent on receiving the response from the upstream server.                           |\n| $ssl_protocol           | -        | Protocol of an established SSL connection.                                               |\n| $ssl_cipher             | -        | String of ciphers used for an established SSL connection.                                |\n\nNotes:\n\n- Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`.\n- Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network.\n- To get `%I` and `%O` working you need to enable `mod_logio` on Apache.\n- NGINX logs URI with query parameters, Apache doesnt.\n- `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others.\n- Don\'t use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency. | 1 | no |\n|  | autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| **Target** | path | Path to the web server log file. |  | yes |\n|  | exclude_path | Path to exclude. | *.gz | no |\n| **Customization** | [url_patterns](#option-customization-url-patterns) | List of URL patterns. | [] | no |\n|  | url_patterns.name | Used as a dimension name. |  | yes |\n|  | url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/netdata/tree/master/src/go/pkg/matcher#supported-format). |  | yes |\n| **Parser** | [log_type](#option-parser-log-type) | Log parser type. | auto | no |\n|  | csv_config | CSV log parser config. |  | no |\n|  | csv_config.delimiter | CSV field delimiter. | , | no |\n|  | csv_config.format | CSV log format. |  | no |\n|  | ltsv_config | LTSV log parser config. |  | no |\n|  | ltsv_config.field_delimiter | LTSV field delimiter. | \\t | no |\n|  | ltsv_config.value_delimiter | LTSV value delimiter. | : | no |\n|  | [ltsv_config.mapping](#option-parser-ltsv-config-mapping) | LTSV fields mapping to **known fields**. |  | yes |\n|  | json_config | JSON log parser config. |  | no |\n|  | [json_config.mapping](#option-parser-json-config-mapping) | JSON fields mapping to **known fields**. |  | yes |\n|  | regexp_config | RegExp log parser config. |  | no |\n|  | [regexp_config.pattern](#option-parser-regexp-config-pattern) | RegExp pattern with named groups. |  | yes |\n\n<a id="option-customization-url-patterns"></a>\n##### url_patterns\n\n"URL pattern" scope metrics will be collected for each URL pattern. \n\nOption syntax:\n\n```yaml\nurl_patterns:\n  - name: name1\n    pattern: pattern1\n  - name: name2\n    pattern: pattern2\n```\n\n\n<a id="option-parser-log-type"></a>\n##### log_type\n\nWeblog supports 5 different log parsers:\n\n| Parser type | Description                               |\n|-------------|-------------------------------------------|\n| auto        | Use CSV and auto-detect format            |\n| csv         | A comma-separated values                  |\n| json        | [JSON](https://www.json.org/json-en.html) |\n| ltsv        | [LTSV](http://ltsv.org/)                  |\n| regexp      | Regular expression with named groups      |\n\nSyntax:\n\n```yaml\nlog_type: auto\n```\n\nIf `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file.\n\n- checks if format is `CSV` (using regexp).\n- checks if format is `JSON` (using regexp).\n- assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later):\n\n  ```sh\n  $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n  $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time\n  $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent     $request_length $request_time $upstream_response_time\n  $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent     $request_length $request_time\n  $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent\n  $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n  $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time\n  $remote_addr - - [$time_local] "$request" $status $body_bytes_sent     $request_length $request_time $upstream_response_time\n  $remote_addr - - [$time_local] "$request" $status $body_bytes_sent     $request_length $request_time\n  $remote_addr - - [$time_local] "$request" $status $body_bytes_sent\n  ```\n\n  If you\'re using the default Apache/NGINX log format, auto-detect will work for you. If it doesn\'t work you need to set the format manually.\n\n\n<a id="option-parser-ltsv-config-mapping"></a>\n##### ltsv_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don\'t use `$` and `%` prefixes for mapped field names.\n\n```yaml\nlog_type: ltsv\nltsv_config:\n  mapping:\n    label1: field1\n    label2: field2\n```\n\n\n<a id="option-parser-json-config-mapping"></a>\n##### json_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don\'t use `$` and `%` prefixes for mapped field names.\n\n```yaml\nlog_type: json\njson_config:\n  mapping:\n    label1: field1\n    label2: field2\n```\n\n\n<a id="option-parser-regexp-config-pattern"></a>\n##### regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don\'t use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nlog_type: regexp\nregexp_config:\n  pattern: PATTERN\n```\n\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **web_log** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the web_log data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _web_log_ (or scroll the list) to locate the **web_log** collector.\n5. Click the **+** next to the **web_log** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/web_log.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/web_log.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `web_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m web_log\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m web_log -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `web_log` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep web_log\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep web_log /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep web_log\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ web_log_1m_unmatched ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.excluded_requests | percentage of unparsed log lines over the last minute |\n| [ web_log_1m_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over the last minute (1xx, 2xx, 304, 401) |\n| [ web_log_1m_redirects ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of redirection HTTP requests over the last minute (3xx except 304) |\n| [ web_log_1m_bad_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of client error HTTP requests over the last minute (4xx except 401) |\n| [ web_log_1m_internal_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of server error HTTP requests over the last minute (5xx) |\n| [ web_log_web_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.request_processing_time | average HTTP response time over the last 1 minute |\n| [ web_log_5m_requests_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over over the last 5 minutes, compared with the previous 5 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Web server log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.requests | requests | requests/s |\n| web_log.excluded_requests | unmatched | requests/s |\n| web_log.type_requests | success, bad, redirect, error | requests/s |\n| web_log.status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| web_log.status_code_class_1xx_responses | a dimension per 1xx code | responses/s |\n| web_log.status_code_class_2xx_responses | a dimension per 2xx code | responses/s |\n| web_log.status_code_class_3xx_responses | a dimension per 3xx code | responses/s |\n| web_log.status_code_class_4xx_responses | a dimension per 4xx code | responses/s |\n| web_log.status_code_class_5xx_responses | a dimension per 5xx code | responses/s |\n| web_log.bandwidth | received, sent | kilobits/s |\n| web_log.request_processing_time | min, max, avg | milliseconds |\n| web_log.requests_processing_time_histogram | a dimension per bucket | requests/s |\n| web_log.upstream_response_time | min, max, avg | milliseconds |\n| web_log.upstream_responses_time_histogram | a dimension per bucket | requests/s |\n| web_log.current_poll_uniq_clients | ipv4, ipv6 | clients |\n| web_log.vhost_requests | a dimension per vhost | requests/s |\n| web_log.port_requests | a dimension per port | requests/s |\n| web_log.scheme_requests | http, https | requests/s |\n| web_log.http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.http_version_requests | a dimension per HTTP version | requests/s |\n| web_log.ip_proto_requests | ipv4, ipv6 | requests/s |\n| web_log.ssl_proto_requests | a dimension per SSL protocol | requests/s |\n| web_log.ssl_cipher_suite_requests | a dimension per SSL cipher suite | requests/s |\n| web_log.url_pattern_requests | a dimension per URL pattern | requests/s |\n| web_log.custom_field_pattern_requests | a dimension per custom field pattern | requests/s |\n\n### Per custom time field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_time_field_summary | min, max, avg | milliseconds |\n| web_log.custom_time_field_histogram | a dimension per bucket | observations |\n\n### Per custom numeric field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_numeric_field_{{field_name}}_summary | min, max, avg | {{units}} |\n\n### Per URL pattern\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.url_pattern_status_code_responses | a dimension per pattern | responses/s |\n| web_log.url_pattern_http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.url_pattern_bandwidth | received, sent | kilobits/s |\n| web_log.url_pattern_request_processing_time | min, max, avg | milliseconds |\n\n",integration_type:"collector",id:"go.d.plugin-web_log-Web_server_log_files",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/weblog/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-whoisquery",plugin_name:"go.d.plugin",module_name:"whoisquery",monitored_instance:{name:"Domain expiration date",link:"",icon_filename:"globe.svg",categories:["data-collection.synthetic-testing"]},keywords:["whois"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Domain expiration date\n\nPlugin: go.d.plugin\nModule: whoisquery\n\n## Overview\n\nThis collector monitors the remaining time before the domain expires.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **whoisquery** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **whoisquery**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/whoisquery.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection frequency. | 60 | no |\n|  | autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| **Target** | source | Domain address. |  | yes |\n|  | timeout | The query timeout in seconds. | 5 | no |\n| **Customization** | days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n|  | days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **whoisquery** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the whoisquery data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _whoisquery_ (or scroll the list) to locate the **whoisquery** collector.\n5. Click the **+** next to the **whoisquery** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/whoisquery.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/whoisquery.conf\n```\n\n##### Examples\n\n###### Basic\n\nBasic configuration example\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: my_site\n    source: my_site.com\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple domains.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: my_site1\n    source: my_site1.com\n\n  - name: my_site2\n    source: my_site2.com\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `whoisquery` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m whoisquery\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m whoisquery -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `whoisquery` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep whoisquery\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep whoisquery /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep whoisquery\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ whoisquery_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/whoisquery.conf) | whoisquery.time_until_expiration | time until the domain name registration expires |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per domain\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| domain | Configured source |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| whoisquery.time_until_expiration | expiry | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-whoisquery-Domain_expiration_date",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/whoisquery/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-wireguard",plugin_name:"go.d.plugin",module_name:"wireguard",monitored_instance:{name:"WireGuard",link:"https://www.wireguard.com/",categories:["data-collection.networking"],icon_filename:"wireguard.svg"},keywords:["wireguard","vpn","security"],info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# WireGuard\n\nPlugin: go.d.plugin\nModule: wireguard\n\n## Overview\n\nThis collector monitors WireGuard VPN devices and peers traffic.\n\n\nIt connects to the local WireGuard instance using [wireguard-go client](https://github.com/WireGuard/wireguard-go).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the CAP_NET_ADMIN capability, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects instances running on localhost.\n\n\n#### Limits\n\nDoesn't work if Netdata or WireGuard is installed in the container.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **wireguard** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **wireguard**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/wireguard.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **wireguard** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the wireguard data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _wireguard_ (or scroll the list) to locate the **wireguard** collector.\n5. Click the **+** next to the **wireguard** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/wireguard.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/wireguard.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `wireguard` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m wireguard\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m wireguard -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `wireguard` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep wireguard\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep wireguard /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep wireguard\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the VPN network interface.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | VPN network interface |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.device_network_io | receive, transmit | B/s |\n| wireguard.device_peers | peers | peers |\n\n### Per peer\n\nThese metrics refer to the VPN peer.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | VPN network interface |\n| public_key | Public key of a peer |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.peer_network_io | receive, transmit | B/s |\n| wireguard.peer_latest_handshake_ago | time | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-wireguard-WireGuard",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/wireguard/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-x509check",plugin_name:"go.d.plugin",module_name:"x509check",monitored_instance:{name:"X.509 certificate",link:"",categories:["data-collection.synthetic-testing"],icon_filename:"lock.svg"},keywords:["x509","certificate"],info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[]}}},overview:"# X.509 certificate\n\nPlugin: go.d.plugin\nModule: x509check\n\n## Overview\n\n\n\nThis collectors monitors x509 certificates expiration time and revocation status.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **x509check** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **x509check**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/x509check.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | source | Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file, smtp. |  | no |\n|  | timeout | SSL connection timeout (seconds). | 2 | no |\n| **Validation** | check_full_chain | Monitor expiration time for all certificates in the chain (including intermediates and root). | no | no |\n|  | check_revocation_status | Check the revocation status of the certificate. | no | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **x509check** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the x509check data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _x509check_ (or scroll the list) to locate the **x509check** collector.\n5. Click the **+** next to the **x509check** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/x509check.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/x509check.conf\n```\n\n##### Examples\n\n###### Website certificate\n\nWebsite certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: my_site_cert\n    source: https://my_site.org:443\n\n```\n{% /details %}\n###### Local file certificate\n\nLocal file certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: my_file_cert\n    source: file:///home/me/cert.pem\n\n```\n{% /details %}\n###### SMTP certificate\n\nSMTP certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: my_smtp_cert\n    source: smtp://smtp.my_mail.org:587\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple websites\' certificates.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: my_site_cert1\n    source: https://my_site1.org:443\n\n  - name: my_site_cert2\n    source: https://my_site1.org:443\n\n  - name: my_site_cert3\n    source: https://my_site3.org:443\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `x509check` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m x509check\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m x509check -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `x509check` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep x509check\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep x509check /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep x509check\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ x509check_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.time_until_expiration | SSL cert expiring soon (${label:source} cn:${label:common_name}) |\n| [ x509check_revocation_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.revocation_status | SSL cert revoked (${label:source}) |\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per source\n\nThese metrics refer to the SSL certificate.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| source | Same as the "source" configuration option. |\n| common_name | The common name (CN) extracted from the certificate. |\n| depth | The depth of the certificate within the certificate chain. The leaf certificate has a depth of 0, and subsequent certificates (intermediate certificates) have increasing depth values. The root certificate is at the highest depth. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| x509check.time_until_expiration | expiry | seconds |\n| x509check.revocation_status | not_revoked, revoked | boolean |\n\n',integration_type:"collector",id:"go.d.plugin-x509check-X.509_certificate",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/x509check/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-yugabytedb",plugin_name:"go.d.plugin",module_name:"yugabytedb",monitored_instance:{name:"YugabyteDB",link:"https://www.yugabyte.com/yugabytedb",categories:["data-collection.databases"],icon_filename:"yugabytedb.svg"},related_resources:{integrations:{list:[]}},alternative_monitored_instances:[],info_provided_to_referring_integrations:{description:""},keywords:["db","database","yb","yugabyte"]},overview:"# YugabyteDB\n\nPlugin: go.d.plugin\nModule: yugabytedb\n\n## Overview\n\nThis collector monitors the activity and performance of YugabyteDB servers.\n\n\nIt sends HTTP requests to the YugabyteDB [metric endpoints](https://docs.yugabyte.com/preview/launch-and-manage/monitor-and-alert/metrics/#metric-endpoints).\n\nIt also provides `top-queries` and `running-queries` functions using `pg_stat_statements` and `pg_stat_activity` from YSQL.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe `top-queries` function requires the `pg_stat_statements` extension to be installed in the target database.\n\nViewing all running queries via `pg_stat_activity` may require elevated privileges (e.g., `pg_read_all_stats`).\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects YugabyteDB instances running on localhost.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:7000/prometheus-metrics (Master)\n- http://127.0.0.1:9000/prometheus-metrics (Tablet Server)\n- http://127.0.0.1:12000/prometheus-metrics (YCQL)\n- http://127.0.0.1:13000/prometheus-metrics (YSQL)\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **yugabytedb** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **yugabytedb**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/yugabytedb.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 5 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | url | Target endpoint URL. | http://127.0.0.1:7000/prometheus-metrics | yes |\n|  | timeout | HTTP request timeout (seconds). | 1 | no |\n| **HTTP Auth** | username | Username for Basic HTTP authentication. |  | no |\n|  | password | Password for Basic HTTP authentication. |  | no |\n|  | bearer_token_file | Path to a file containing a bearer token (used for `Authorization: Bearer`). |  | no |\n| **TLS** | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Proxy** | proxy_url | HTTP proxy URL. |  | no |\n|  | proxy_username | Username for proxy Basic HTTP authentication. |  | no |\n|  | proxy_password | Password for proxy Basic HTTP authentication. |  | no |\n| **Request** | method | HTTP method to use. | GET | no |\n|  | body | Request body (e.g., for POST/PUT). |  | no |\n|  | headers | Additional HTTP headers (one per line as key: value). |  | no |\n|  | not_follow_redirects | Do not follow HTTP redirects. | no | no |\n|  | force_http2 | Force HTTP/2 (including h2c over TCP). | no | no |\n| **Functions** | functions.dsn | SQL DSN (required for query functions). |  | no |\n|  | functions.top_queries.disabled | Disable the [top-queries](#top-queries) function. | no | no |\n|  | functions.top_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.top_queries.limit | Maximum number of queries to return. | 500 | no |\n|  | functions.running_queries.disabled | Disable the [running-queries](#running-queries) function. | no | no |\n|  | functions.running_queries.timeout | Query timeout (seconds). Uses collector timeout if not set. |  | no |\n|  | functions.running_queries.limit | Maximum number of queries to return. | 500 | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **yugabytedb** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the yugabytedb data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _yugabytedb_ (or scroll the list) to locate the **yugabytedb** collector.\n5. Click the **+** next to the **yugabytedb** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/yugabytedb.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/yugabytedb.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:7000/prometheus-metrics     # Master\n    # url: http://127.0.0.1:9000/prometheus-metrics   # Tablet Server\n    # url: http://127.0.0.1:12000/prometheus-metrics  # YCQL\n    # url: http://127.0.0.1:13000/prometheus-metrics  # YSQL\n\n```\n###### Top queries\n\nEnable SQL query functions (YSQL).\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:7000/prometheus-metrics\n    functions:\n      dsn: postgres://yugabyte@127.0.0.1:5433/yugabyte?sslmode=disable\n\n```\n{% /details %}\n###### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:7000/prometheus-metrics\n    username: username\n    password: password\n\n```\n{% /details %}\n###### HTTPS with self-signed certificate\n\nNGINX with enabled HTTPS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: https://127.0.0.1:7000/prometheus-metrics\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    url: http://127.0.0.1:7000/prometheus-metrics\n\n  - name: remote\n    url: http://192.0.2.1:7000/prometheus-metrics\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `yugabytedb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m yugabytedb\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m yugabytedb -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `yugabytedb` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep yugabytedb\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep yugabytedb /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep yugabytedb\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",functions:"## Functions\n\nThis collector exposes real-time functions for interactive troubleshooting in the Live tab.\n\n\n### Top Queries\n\nRetrieves aggregated query statistics from the PostgreSQL-compatible [pg_stat_statements](https://docs.yugabyte.com/preview/explore/query-1-performance/pg-stat-statements/) extension in YSQL.\n\nThis function queries the `pg_stat_statements` view which tracks execution statistics for all SQL statements executed on the YSQL layer. It provides timing metrics, execution counts, and row statistics for each unique query pattern.\n\nUse cases:\n- Identify slow queries consuming excessive total execution time\n- Find high-frequency queries that may benefit from optimization\n- Analyze query patterns by database and user\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Yugabytedb:top-queries` |\n| Require Cloud | yes |\n| Performance | Queries the `pg_stat_statements` view via YSQL connection:<br/>\u2022 Default limit of 500 rows balances completeness with performance<br/>\u2022 Use `sql_timeout` to prevent long-running queries |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in WHERE clauses or INSERT values<br/>\u2022 Business data embedded in queries<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to YSQL<br/>\u2022 The `pg_stat_statements` extension is installed<br/>\u2022 Returns HTTP 503 if the SQL connection cannot be established or extension is not installed<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Enable pg_stat_statements extension\n\nThe `pg_stat_statements` extension must be installed in the target YSQL database.\n\n1. Install the extension:\n\n   ```sql\n   CREATE EXTENSION IF NOT EXISTS pg_stat_statements;\n   ```\n\n2. Verify access:\n\n   ```sql\n   SELECT * FROM pg_stat_statements LIMIT 1;\n   ```\n\n:::info\n\n- The extension tracks statistics for all SQL statements executed\n- Statistics can be reset with `SELECT pg_stat_statements_reset()`\n- YugabyteDB uses PostgreSQL-compatible extensions\n\n:::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Options include total time, mean time, max time, calls, and rows. Defaults to total time to focus on most resource-intensive queries. | yes | totalTime |  |\n\n#### Returns\n\nAggregated query statistics from `pg_stat_statements`. Each row represents a unique query pattern with cumulative metrics across all executions.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| Query ID | string |  | hidden | Internal hash identifier for the normalized query pattern. |\n| Query | string |  |  | Normalized SQL query text with literals replaced by parameter placeholders. Truncated to 4096 characters. |\n| Database | string |  |  | Database name where the query was executed. Useful for multi-database workload analysis. |\n| User | string |  |  | YSQL user who executed the query. Useful for identifying workload by user or application. |\n| Calls | integer |  |  | Total number of times this query pattern has been executed. High values indicate frequently run queries. |\n| Total Time | duration | milliseconds |  | Cumulative execution time across all calls. Primary metric for identifying resource-intensive queries. |\n| Mean Time | duration | milliseconds |  | Average execution time per call. Compare with total time to distinguish slow queries from frequently called ones. |\n| Min Time | duration | milliseconds | hidden | Minimum execution time observed for this query pattern. |\n| Max Time | duration | milliseconds | hidden | Maximum execution time observed. Large gaps between min and max may indicate parameter sensitivity or lock contention. |\n| Rows | integer |  |  | Total number of rows retrieved or affected by the query across all executions. |\n| Stddev Time | duration | milliseconds | hidden | Standard deviation of execution times. High values indicate inconsistent query performance. |\n\n### Running Queries\n\nRetrieves currently executing statements from the PostgreSQL-compatible [pg_stat_activity](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) view in YSQL.\n\nThis function queries `pg_stat_activity` to show all non-idle backend processes with their current query, state, and timing information. It excludes idle connections to focus on active workload.\n\nUse cases:\n- Identify long-running queries that may need investigation\n- Monitor active connections and their current state\n- Investigate blocked or waiting queries\n\nQuery text is truncated at 4096 characters for display purposes.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Yugabytedb:running-queries` |\n| Require Cloud | yes |\n| Performance | Queries the `pg_stat_activity` view via YSQL connection:<br/>\u2022 Returns only non-idle connections to reduce result size<br/>\u2022 Default limit of 500 rows balances completeness with performance<br/>\u2022 Use `sql_timeout` to prevent long-running queries |\n| Security | Query text may contain unmasked literal values including potentially sensitive data:<br/>\u2022 Personal information in WHERE clauses or INSERT values<br/>\u2022 Business data embedded in queries<br/>\u2022 Access should be restricted to authorized personnel only |\n| Availability | Available when:<br/>\u2022 The collector has successfully connected to YSQL<br/>\u2022 Returns HTTP 503 if the SQL connection cannot be established<br/>\u2022 Returns HTTP 500 if the query fails<br/>\u2022 Returns HTTP 504 if the query times out |\n\n#### Prerequisites\n\n##### Grant access to all queries (optional)\n\nBy default, users can only see their own queries in `pg_stat_activity`. To view all users' queries, grant the `pg_read_all_stats` role:\n\n```sql\nGRANT pg_read_all_stats TO your_user;\n```\n\n:::info\n\n- The `yugabyte` superuser can see all queries by default\n- Without elevated privileges, only the user's own queries are visible\n- Idle connections are filtered out from results\n\n:::\n\n\n\n#### Parameters\n\n| Parameter | Type | Description | Required | Default | Options |\n|:---------|:-----|:------------|:--------:|:--------|:--------|\n| Filter By | select | Select the primary sort column. Defaults to elapsed time to focus on longest-running queries. | yes | elapsedMs |  |\n\n#### Returns\n\nCurrently running SQL statements from `pg_stat_activity`. Each row represents an active backend process with its current query and execution context.\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n| PID | string |  | hidden | Backend process ID. Can be used with pg_terminate_backend() to cancel a query. |\n| Query | string |  |  | The SQL statement currently being executed. Truncated to 4096 characters. |\n| Database | string |  |  | Database name the backend is connected to. |\n| User | string |  |  | YSQL user name of the backend process. |\n| State | string |  |  | Current state of the backend (active, idle in transaction, fastpath function call, etc.). |\n| Wait Event Type | string |  | hidden | Type of event the backend is waiting for (Lock, LWLock, IO, etc.). Null if not waiting. |\n| Wait Event | string |  | hidden | Specific wait event name. Useful for diagnosing lock contention or I/O bottlenecks. |\n| Application | string |  | hidden | Application name set by the client connection. Useful for identifying which application is running the query. |\n| Client Address | string |  | hidden | IP address of the client connection. |\n| Query Start | string |  | hidden | Timestamp when the current query began execution. |\n| Elapsed | duration | milliseconds |  | Time elapsed since the query started. High values indicate long-running queries that may need investigation. |\n\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Master Client operations (Master)\n\nMetrics tracking latency and counts of Master Client RPC operations.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| operation | The specific MasterClient RPC operation name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.master_client_operations | operations | ops/s |\n| yugabytedb.master_client_operations_latency | latency | microseconds |\n\n### Per Master DDL operations\n\nMetrics tracking Data Definition Language (DDL) operations performed on the Master server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| operation | The specific DDL operation handler name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.master_ddl_operations | operations | ops/s |\n| yugabytedb.master_ddl_operations_latency | latency | microseconds |\n\n### Per TabletServerService (Master)\n\nMetrics tracking latency and counts of TabletServer RPC operations on the Master server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| op | The specific TabletServer RPC operation name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.master_tabletserverservice_operations | operations | ops/s |\n| yugabytedb.master_tabletserverservice_operations_latency | latency | microseconds |\n| yugabytedb.master_tabletserverservice_traffic | received, sent | bytes/s |\n\n### Per PgClientService (Master)\n\nMetrics tracking latency and counts of PostgreSQL client service RPC operations on the Master server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| operation | The specific PgClientService RPC operation name that handles PostgreSQL protocol requests and responses. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.master_pgclientservice_operations | operations | ops/s |\n| yugabytedb.master_pgclientservice_operations_latency | latency | microseconds |\n| yugabytedb.master_pgclientservice_traffic | received, sent | bytes/s |\n\n### Per RemoteBootstrapService (Master)\n\nMetrics tracking latency and counts of RemoteBootstrap RPC operations on the Master server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| operation | The specific RemoteBootstrap RPC operation name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.master_remotebootstrapservice_operations | operations | ops/s |\n| yugabytedb.master_remotebootstrapservice_operations_latency | latency | microseconds |\n| yugabytedb.master_remotebootstrapservice_traffic | received, sent | bytes/s |\n\n### Per Raft operations (Master)\n\nMetrics tracking latency and counts of Raft consensus protocol operations on the Master server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| operation | The specific Raft protocol operation name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.master_consensus_operations | operations | ops/s |\n| yugabytedb.master_consensus_operations_latency | latency | microseconds |\n| yugabytedb.master_consensus_traffic | received, sent | bytes/s |\n\n### Per TabletServerService (Tablet Server)\n\nMetrics tracking latency and counts of TabletServer RPC operations on the Tablet server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| op | The specific TabletServer RPC operation name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.tserver_tabletserverservice_operations | operations | ops/s |\n| yugabytedb.tserver_tabletserverservice_operations_latency | latency | microseconds |\n| yugabytedb.tserver_tabletserverservice_traffic | received, sent | bytes/s |\n\n### Per TabletServerAdminService (Tablet Server)\n\nMetrics tracking latency and counts of TabletServerAdmin RPC operations on the Tablet server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| op | The specific TabletServerAdmin RPC operation name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.tserver_tabletserveradminservice_operations | operations | ops/s |\n| yugabytedb.tserver_tabletserveradminservice_operations_latency | latency | microseconds |\n| yugabytedb.tserver_tabletserveradminservice_traffic | received, sent | bytes/s |\n\n### Per TabletServerBackupService (Tablet Server)\n\nMetrics tracking latency and counts of TabletServerBackup RPC operations on the Tablet server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| op | The specific TabletServerBackup RPC operation name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.tserver_tabletserverbackupservice_operations | operations | ops/s |\n| yugabytedb.tserver_tabletserverbackupservice_operations_latency | latency | microseconds |\n| yugabytedb.tserver_tabletserverbackupservice_traffic | received, sent | bytes/s |\n\n### Per PgClientService (Tablet Server)\n\nMetrics tracking latency and counts of PostgreSQL client service RPC operations on the Tablet server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| operation | The specific PgClientService RPC operation name that handles PostgreSQL protocol requests and responses. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.tserver_pgclientservice_operations | operations | ops/s |\n| yugabytedb.tserver_pgclientservice_operations_latency | latency | microseconds |\n| yugabytedb.tserver_pgclientservice_traffic | received, sent | bytes/s |\n\n### Per RemoteBootstrapService (Tablet Server)\n\nMetrics tracking latency and counts of RemoteBootstrap RPC operations on the Tablet server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| operation | The specific RemoteBootstrap RPC operation name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.tserver_remotebootstrapservice_operations | operations | ops/s |\n| yugabytedb.tserver_remotebootstrapservice_operations_latency | latency | microseconds |\n| yugabytedb.tserver_remotebootstrapservice_traffic | received, sent | bytes/s |\n\n### Per Raft operations (Tablet Server)\n\nMetrics tracking latency and counts of Raft consensus protocol operations on the Tablet server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| operation | The specific Raft protocol operation name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.tserver_consensus_operations | operations | ops/s |\n| yugabytedb.tserver_consensus_operations_latency | latency | microseconds |\n| yugabytedb.tserver_consensus_traffic | received, sent | bytes/s |\n\n### Per SQL Statements (YCQL)\n\nMetrics tracking latency and counts of SQL statements on the YCQL server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| statement | The specific SQL statement name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.ycql_sql_statements | statements | statements/s |\n| yugabytedb.yCql_sql_statements_latency | latency | microseconds |\n\n### Per YSQL server\n\nMetrics tracking connections on the YSQL server.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.ysql_connection_usage | available, used | connections |\n| yugabytedb.ysql_active_connections | active | connections |\n| yugabytedb.ysql_established_connections | established | connections/s |\n| yugabytedb.ysql_over_limit_connections | over_limit | rejects/s |\n\n### Per SQL Statements (YSQL)\n\nMetrics tracking latency and counts of SQL statements on the YSQL server.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| statement | The specific SQL statement name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| yugabytedb.ysql_sql_statements | statements | statements/s |\n| yugabytedb.ysql_sql_statements_latency | latency | microseconds |\n\n",integration_type:"collector",id:"go.d.plugin-yugabytedb-YugabyteDB",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/yugabytedb/metadata.yaml",related_resources:""},{meta:{id:"collector-go.d.plugin-zfspool",plugin_name:"go.d.plugin",module_name:"zfspool",monitored_instance:{name:"ZFS Pools",link:"",icon_filename:"filesystem.svg",categories:["data-collection.storage"]},keywords:["zfs pools","pools","zfs","filesystem"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# ZFS Pools\n\nPlugin: go.d.plugin\nModule: zfspool\n\n## Overview\n\nThis collector monitors the health and space usage of ZFS pools using the command line tool [zpool](https://openzfs.github.io/openzfs-docs/man/master/8/zpool-list.8.html).\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n- BSD\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\nYou can configure the **zfspool** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **zfspool**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/zfspool.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `zpool` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/bin/zpool | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **zfspool** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the zfspool data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _zfspool_ (or scroll the list) to locate the **zfspool** collector.\n5. Click the **+** next to the **zfspool** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/zfspool.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zfspool.conf\n```\n\n##### Examples\n\n###### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: zfspool\n    binary_path: /usr/local/sbin/zpool\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `zfspool` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m zfspool\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m zfspool -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `zfspool` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep zfspool\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep zfspool /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep zfspool\n```\n\n",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_pool_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_space_utilization | ZFS pool ${label:pool} is nearing capacity. Current space usage is above the threshold. |\n| [ zfs_pool_health_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_health_state | ZFS pool ${label:pool} state is degraded |\n| [ zfs_pool_health_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_health_state | ZFS pool ${label:pool} state is faulted or unavail |\n| [ zfs_vdev_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.vdev_health_state | ZFS vdev ${label:vdev} state is faulted or degraded |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs pool\n\nThese metrics refer to the ZFS pool.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| pool | Zpool name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfspool.pool_space_utilization | utilization | % |\n| zfspool.pool_space_usage | free, used | bytes |\n| zfspool.pool_fragmentation | fragmentation | % |\n| zfspool.pool_health_state | online, degraded, faulted, offline, unavail, removed, suspended | state |\n\n### Per zfs pool vdev\n\nThese metrics refer to the ZFS pool virtual device.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| pool | Zpool name |\n| vdev | Unique identifier for a virtual device (vdev) within a ZFS pool. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfspool.vdev_health_state | online, degraded, faulted, offline, unavail, removed, suspended | state |\n\n",integration_type:"collector",id:"go.d.plugin-zfspool-ZFS_Pools",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/zfspool/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-go.d.plugin-zookeeper",plugin_name:"go.d.plugin",module_name:"zookeeper",monitored_instance:{name:"ZooKeeper",link:"https://zookeeper.apache.org/",categories:["data-collection.applications"],icon_filename:"zookeeper.svg"},keywords:["zookeeper"],info_provided_to_referring_integrations:{description:""},related_resources:{integrations:{list:[{plugin_name:"apps.plugin",module_name:"apps"}]}}},overview:'# ZooKeeper\n\nPlugin: go.d.plugin\nModule: zookeeper\n\n## Overview\n\n\n\nIt connects to the Zookeeper instance via a TCP and executes the following commands:\n\n- [mntr](https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nZooKeeper can be monitored further using the following other integrations:\n\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known ZooKeeper TCP sockets:\n\n- 127.0.0.1:2181\n- 127.0.0.1:2182\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\nYou can configure the **zookeeper** collector in two ways:\n\n| Method                | Best for                                                                                 | How to                                                                                                                                 |\n|-----------------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|\n| [**UI**](#via-ui)     | Fast setup without editing files                                                         | Go to **Nodes \u2192 Configure this node \u2192 Collectors \u2192 Jobs**, search for **zookeeper**, then click **+** to add a job. |\n| [**File**](#via-file) | If you prefer configuring via file, or need to automate deployments (e.g., with Ansible) | Edit `go.d/zookeeper.conf` and add a job.                                                                        |\n\n:::important\n\nUI configuration requires paid Netdata Cloud plan.\n\n:::\n\n\n### Prerequisites\n\n#### Whitelist `mntr` command\n\nAdd `mntr` to Zookeeper\'s [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw).\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **Collection** | update_every | Data collection interval (seconds). | 1 | no |\n|  | autodetection_retry | Autodetection retry interval (seconds). Set 0 to disable. | 0 | no |\n| **Target** | address | Zookeeper server address (`IP:PORT`). | 127.0.0.1:2181 | yes |\n|  | timeout | Connection, read, write, and TLS handshake timeout (seconds). | 1 | no |\n| **TLS** | use_tls | Enable TLS for the connection. | no | no |\n|  | tls_skip_verify | Skip TLS certificate and hostname verification (insecure). | no | no |\n|  | tls_ca | Path to CA bundle used to validate the server certificate. |  | no |\n|  | tls_cert | Path to client TLS certificate (for mTLS). |  | no |\n|  | tls_key | Path to client TLS private key (for mTLS). |  | no |\n| **Virtual Node** | vnode | Associates this data collection job with a [Virtual Node](https://learn.netdata.cloud/docs/netdata-agent/configuration/organize-systems-metrics-and-alerts#virtual-nodes). |  | no |\n\n\n{% /details %}\n\n#### via UI\n\nConfigure the **zookeeper** collector from the Netdata web interface:\n\n1. Go to **Nodes**.\n2. Select the node **where you want the zookeeper data-collection job to run** and click the :gear: (**Configure this node**). That node will run the data collection.\n3. The **Collectors \u2192 Jobs** view opens by default.\n4. In the Search box, type _zookeeper_ (or scroll the list) to locate the **zookeeper** collector.\n5. Click the **+** next to the **zookeeper** collector to add a new job.\n6. Fill in the job fields, then click **Test** to verify the configuration and **Submit** to save.\n    - **Test** runs the job with the provided settings and shows whether data can be collected.\n    - If it fails, an error message appears with details (for example, connection refused, timeout, or command execution errors), so you can adjust and retest.\n\n\n#### via File\n\nThe configuration file name for this integration is `go.d/zookeeper.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\njobs:\n  - name: some_name1\n  - name: some_name2\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zookeeper.conf\n```\n\n##### Examples\n\n###### Basic\n\nLocal server.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:2181\n\n```\n{% /details %}\n###### TLS with self-signed certificate\n\nZookeeper with TLS and self-signed certificate.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:2181\n    use_tls: yes\n    tls_skip_verify: yes\n\n```\n{% /details %}\n###### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    address: 127.0.0.1:2181\n\n  - name: remote\n    address: 192.0.2.1:2181\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `zookeeper` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n  ```bash\n  ./go.d.plugin -d -m zookeeper\n  ```\n\n  To debug a specific job:\n\n  ```bash\n  ./go.d.plugin -d -m zookeeper -j jobName\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `zookeeper` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep zookeeper\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep zookeeper /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep zookeeper\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZooKeeper server\n\nThese metrics refer to ZooKeeper servers.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zookeeper.requests | outstanding | requests |\n| zookeeper.requests_latency | min, avg, max | ms |\n| zookeeper.stale_requests | stale | requests/s |\n| zookeeper.stale_requests_dropped | dropped | requests/s |\n| zookeeper.connections | alive | connections |\n| zookeeper.connections_dropped | dropped | connections/s |\n| zookeeper.connections_rejected | rejected | connections/s |\n| zookeeper.auth_fails | auth | fails/s |\n| zookeeper.global_sessions | global | sessions |\n| zookeeper.server_state | leader, follower, observer, standalone | state |\n| zookeeper.throttled_ops | throttled | ops/s |\n| zookeeper.packets | received, sent | pps |\n| zookeeper.file_descriptor | open | file descriptors |\n| zookeeper.nodes | znode, ephemerals | nodes |\n| zookeeper.watches | watches | watches |\n| zookeeper.approximate_data_size | size | KiB |\n| zookeeper.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"go.d.plugin-zookeeper-ZooKeeper",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/collector/zookeeper/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"guides",module_name:"proxmox",monitored_instance:{name:"Proxmox VE Monitoring",link:"https://www.proxmox.com/",icon_filename:"proxmox.png",categories:["data-collection.containers-and-vms"]},keywords:["proxmox","proxmox ve","pve","kvm","qemu","lxc","virtualization","hypervisor","virtual machines","containers","ceph","zfs","corosync","cluster"],related_resources:{integrations:{list:[{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Proxmox VMs and Containers"},{plugin_name:"cgroups.plugin",module_name:"/sys/fs/cgroup",monitored_instance_name:"Systemd Services"},{plugin_name:"apps.plugin",module_name:"apps",monitored_instance_name:"Applications"},{plugin_name:"go.d.plugin",module_name:"zfspool"},{plugin_name:"go.d.plugin",module_name:"ceph"},{plugin_name:"go.d.plugin",module_name:"smartctl"},{plugin_name:"go.d.plugin",module_name:"sensors"},{plugin_name:"proc.plugin",module_name:"/proc/net/dev"},{plugin_name:"proc.plugin",module_name:"/proc/diskstats"},{plugin_name:"proc.plugin",module_name:"/proc/stat"},{plugin_name:"proc.plugin",module_name:"/proc/meminfo"},{plugin_name:"proc.plugin",module_name:"/proc/spl/kstat/zfs/arcstats"}]}},info_provided_to_referring_integrations:{description:""}},overview:'# Proxmox VE Monitoring\n\nPlugin: guides\nModule: proxmox\n\n## Overview\n\nThis guide describes how Netdata monitors Proxmox VE hypervisors. Netdata provides comprehensive, zero-configuration monitoring of Proxmox hosts, including per-VM and per-container resource utilization, host system metrics, storage health, and cluster components.\n\nWhen installed on a Proxmox host, Netdata automatically discovers and monitors all KVM/QEMU virtual machines and LXC containers through Linux cgroups, resolving friendly names for each VM and container.\n\n\nNetdata uses multiple collectors working together to provide full Proxmox visibility:\n\n- **cgroups.plugin** monitors per-VM and per-container CPU, memory, disk I/O, and network via Linux cgroups. It automatically resolves VM names from `/etc/pve/qemu-server/<VMID>.conf` and container hostnames from `/etc/pve/lxc/<CTID>.conf`.\n- **proc.plugin** monitors host-level system metrics (CPU, memory, network interfaces, disk I/O).\n- **apps.plugin** monitors Proxmox-specific process groups (`proxmox-ve`, `libvirt`, `qemu-guest-agent`).\n- **go.d/zfspool** monitors ZFS pool health, space utilization, and fragmentation (ZFS is common on Proxmox).\n- **go.d/ceph** monitors Ceph cluster health and performance (for Proxmox clusters using Ceph storage).\n- **go.d/smartctl** monitors physical disk SMART health data.\n- **go.d/sensors** monitors hardware temperature, fan speed, and voltage.\n- **ebpf.plugin** provides kernel-level visibility into VM/container syscalls, file I/O, and network activity.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\nProxmox VE Monitoring can be monitored further using the following other integrations:\n\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Proxmox_VMs_and_Containers" %}Proxmox VMs and Containers{% /relatedResource %}\n- {% relatedResource id="cgroups.plugin-/sys/fs/cgroup-Systemd_Services" %}Systemd Services{% /relatedResource %}\n- {% relatedResource id="apps.plugin-apps-Applications" %}Applications{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-zfspool-ZFS_Pools" %}ZFS Pools{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-ceph-Ceph" %}Ceph{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-smartctl-S.M.A.R.T." %}S.M.A.R.T.{% /relatedResource %}\n- {% relatedResource id="go.d.plugin-sensors-Linux_Sensors" %}Linux Sensors{% /relatedResource %}\n- {% relatedResource id="proc.plugin-/proc/net/dev-Network_interfaces" %}Network interfaces{% /relatedResource %}\n- {% relatedResource id="proc.plugin-/proc/diskstats-Disk_Statistics" %}Disk Statistics{% /relatedResource %}\n- {% relatedResource id="proc.plugin-/proc/stat-System_statistics" %}System statistics{% /relatedResource %}\n- {% relatedResource id="proc.plugin-/proc/meminfo-Memory_Usage" %}Memory Usage{% /relatedResource %}\n- {% relatedResource id="proc.plugin-/proc/spl/kstat/zfs/arcstats-ZFS_Adaptive_Replacement_Cache" %}ZFS Adaptive Replacement Cache{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen Netdata is installed on a Proxmox VE host, it automatically detects and monitors:\n\n- All running KVM/QEMU virtual machines\n- All running LXC containers\n- Host system resources (CPU, memory, network, disks)\n- Systemd services (pveproxy, pvedaemon, pvestatd, corosync, etc.)\n- ZFS pools (if ZFS is used)\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:"## Setup\n\n\n### Prerequisites\n\n#### Install Netdata on the Proxmox host\n\nNetdata must be installed directly on the Proxmox VE host (not inside a VM or container) to access cgroups for all VMs and containers.\n\n```bash\nwget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh\n```\n\n\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"## Troubleshooting\n\n### VM or container names not resolved\n\nIf VMs or containers show raw cgroup paths instead of friendly names, verify that:\n\n1. Netdata is installed on the Proxmox host (not inside a VM)\n2. The `/etc/pve/` directory is accessible to the netdata user\n3. The `cgroup-name.sh` script can read VM/container configuration files\n\n\n### Missing ZFS metrics\n\nIf ZFS pool metrics are not showing, ensure the `zfspool` collector is enabled and the `zpool` command is available to the netdata user.\n\n\n### Missing Ceph metrics\n\nCeph metrics require the Ceph collector to be configured with the Ceph REST API endpoint. See the Ceph integration page for details.\n\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nThis guide does not collect metrics directly. Metrics are collected by the related integrations listed above. See each integration's page for detailed metric documentation.\n\n",integration_type:"collector",id:"guides-proxmox-Proxmox_VE_Monitoring",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/guides/proxmox/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ibm.d.plugin",module_name:"as400",monitored_instance:{name:"IBM i (AS/400)",link:"https://www.ibm.com/products/power-systems",categories:["data-collection.operating-systems"],icon_filename:"ibm-i.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["as400"]},overview:'# IBM i (AS/400)\n\nPlugin: ibm.d.plugin\nModule: as400\n\n## Overview\n\nMonitors IBM i (AS/400) systems using SQL services and CL commands to\nexpose CPU, memory, storage, job, and subsystem activity.\n\n**Dependencies:**\n- unixODBC 2.3+ with IBM i Access ODBC driver\n- IBM i 7.2 or later with SQL services enabled\n\n**Required Libraries:**\n- libodbc.so (provided by unixODBC)\n- IBM i Access Client Solutions\n\n**Collection paths**\n\nThe collector executes queries in multiple tracks:\n\n- **Fast path (5s)**: lightweight system status queries remain sequential on the main plugin thread.\n- **Slow path (10s beat)**: heavier queries (per-queue metrics, subsystems, plan cache, etc.) run in a background worker with bounded concurrency.\n- **Batch path (\u226560s beat)**: optional long-period worker used for expensive aggregate queries such as queue totals. Disabled by default unless queue totals are explicitly enabled.\n\n**CPU Collection Methods:**\n\nThe collector uses a hybrid approach for CPU utilization metrics to handle IBM i 7.4+ where\n`AVERAGE_CPU_*` columns were deprecated:\n\n1. **Primary Method - TOTAL_CPU_TIME**: Uses the monotonic `TOTAL_CPU_TIME` counter from\n   `QSYS2.SYSTEM_STATUS()` to calculate CPU utilization via delta-based calculation. This is\n   the most accurate method but requires `*JOBCTL` special authority. TOTAL_CPU_TIME is a\n   cumulative counter in nanoseconds representing CPU-seconds consumed, naturally in per-core\n   scale.\n\n2. **Fallback Method - ELAPSED_CPU_USED**: If `*JOBCTL` authority is not available, falls back\n   to `ELAPSED_CPU_USED` with automatic reset detection. This method tracks when IBM i statistics\n   are reset (either manually or via `reset_statistics` configuration) and re-establishes a\n   baseline after detecting resets. The values are already in per-core scale.\n\n3. **Legacy Method - AVERAGE_CPU_UTILIZATION**: For IBM i versions before 7.4, uses the now-\n   deprecated `AVERAGE_CPU_UTILIZATION` column, which IBM reports in the same per-core scale.\n\nThe collector automatically selects the appropriate method based on available permissions and\nlogs which method is being used.\n\n**CPU Metric Scale:**\n\nCPU utilization is reported using the "100% = 1 CPU core" semantic. This means:\n- 100% indicates one CPU core is fully utilized\n- 400% indicates four CPU cores are fully utilized\n- Values are limited to 100% \xd7 ConfiguredCPUs, matching the partition\'s configured capacity\n\nFor shared LPARs, the metrics show absolute CPU consumption in per-core scale, not relative to\nentitled capacity. For example, a shared LPAR entitled to 0.20 cores can show 150% utilization\nwhen bursting above entitlement.\n\n**Statistics Reset Behavior:**\n\nThe `reset_statistics` configuration option controls whether the collector resets IBM i system\nstatistics on each query via `SYSTEM_STATUS(RESET_STATISTICS=>\'YES\')`. When enabled:\n\n- System-level statistics (CPU, memory pools, etc.) are reset after each collection cycle\n- Matches legacy behavior but clears global statistics that other tools may rely on\n- The ELAPSED_CPU_USED fallback method will detect and handle these resets automatically\n- **Caution**: Enabling this affects all users and applications on the IBM i system\n\nDefault: `false` (statistics are not reset, using `RESET_STATISTICS=>\'NO\'`)\n\n**Chart Gaps During Baseline Resets:**\n\nThe `as400.system_activity_cpu_rate` and `as400.system_activity_cpu_utilization` charts rely on\ndelta calculations. When the collector detects that IBM i reset these statistics\u2014or when it is\nstill establishing the initial baseline\u2014it intentionally skips a sample instead of emitting a zero\nor spike. Netdata renders those skipped samples as small gaps, which is expected behaviour.\n\n**Cardinality Management:**\n\nTo prevent performance issues from excessive metric creation, the collector enforces cardinality\nlimits on per-instance metrics (disks, subsystems, job queues, message queues, output queues,\nactive jobs, network interfaces, HTTP servers).\n\n**How Limits Work:**\n- The collector counts instances before collecting metrics\n- If count exceeds the configured `max_*` limit, **collection is skipped entirely** for that category\n- The collector logs a warning: `"[category] count (X) exceeds limit (Y), skipping collection"`\n- No metrics are collected for that category until you adjust the configuration\n\n**Configuration Options:**\n\nUse **both** limit and selector options together to manage high-cardinality environments:\n\n| Option | Purpose | Default |\n|--------|---------|---------|\n| `max_disks` | Maximum disk units to monitor | 100 |\n| `max_subsystems` | Maximum subsystems to monitor | 100 |\n| `max_job_queues` | Maximum job queues to monitor | 100 |\n| `max_message_queues` | Maximum message queues to monitor | 100 |\n| `max_output_queues` | Maximum output queues to monitor | 100 |\n| `active_jobs` | Fully qualified active jobs to monitor (`JOB_NUMBER/USER/JOB_NAME`) | `[]` |\n| `collect_disks_matching` | Glob pattern to filter disks (e.g., `"001* 002*"`) | `""` (match all) |\n| `collect_subsystems_matching` | Glob pattern to filter subsystems (e.g., `"QINTER QBATCH"`) | `""` (match all) |\n| `collect_job_queues_matching` | Glob pattern to filter job queues (e.g., `"QSYS/*"`) | `""` (match all) |\n\nOptional batch-path controls:\n\n| Option | Purpose | Default |\n|--------|---------|---------|\n| `batch_path` | Enables the long-period batch worker for aggregate queries | `false` |\n| `batch_path_update_every` | Batch worker cadence (minimum 60s, recommend \u2265600s in production) | `60s` |\n| `batch_path_max_connections` | Maximum concurrent connections for batch queries | `1` |\n| `collect_message_queue_totals` | Enables full-scan counting of all message queues and messages | `auto` (off) |\n| `collect_job_queue_totals` | Enables aggregate counting of job queues and queued jobs | `auto` (off) |\n| `collect_output_queue_totals` | Enables aggregate counting of output queues and spooled files | `auto` (off) |\n\n> **Warning:** queue totals require scanning IBM i catalog views and can be very expensive on large systems. Leave these options disabled unless aggregate counts are absolutely necessary.\n\n\n**Example Workflow:**\n\n1. System has 500 disks, collector skips disk metrics (exceeds default limit of 100)\n2. Check logs: `"disk count (500) exceeds limit (100), skipping per-disk metrics"`\n3. Two options:\n   - **Option A**: Increase limit: `max_disks: 500` (collects all 500 disks)\n   - **Option B**: Use selector: `collect_disks_matching: "00[1-5]*"` (cherry-pick specific disks)\n\n**Best Practices:**\n- Use selectors to monitor only business-critical objects in large environments\n- Set limits based on your Netdata server\'s capacity (each instance = multiple charts)\n- Start with defaults and adjust based on actual usage patterns\n\n**IBM i 7.2\u20137.3 Behavior Note (Message Queues):**\n\nIBM i 7.4 introduced a message-queue table function that returns only the live backlog. On\n7.2\u20137.3 systems we fall back to the `QSYS2.MESSAGE_QUEUE_INFO` view, which includes *all*\nrecorded messages (even those already processed/cleared from the queue). Aggregations\u2014especially\n`MAX(SEVERITY)`\u2014therefore reflect the historical log, not just the outstanding backlog. This\nbehaviour is inherent to the IBM SQL service and can lead to higher-than-expected max severity\nvalues on pre-7.4 systems.\n\nNetwork interface metrics have a fixed internal limit of 50 instances, and HTTP server metrics are capped at 200 instances; these limits are currently not configurable.\n\n\nThe collector connects to IBM i (AS/400) and collects metrics via its monitoring interface.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn\'t support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\n### Prerequisites\n\n#### Enable monitoring interface\n\nEnsure the IBM i (AS/400) monitoring interface is accessible.\n\n\n\n### Configuration\n\n#### Options\n\nConfiguration options for the as400 collector.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 1 | no |\n| endpoint | Connection endpoint. | dummy://localhost | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ibm.d/as400.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ibm.d/as400.conf\n```\n\n##### Examples\n\n###### Basic\n\nBasic configuration example.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    endpoint: dummy://localhost\n\n```\n{% /details %}\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per activejob\n\nThese metrics refer to activejob instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| job_name | Job_name identifier |\n| job_status | Job_status identifier |\n| subsystem | Subsystem identifier |\n| job_type | Job_type identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| as400.activejob_cpu | cpu | percentage |\n| as400.activejob_resources | temp_storage | MiB |\n| as400.activejob_time | cpu_time, total_time | seconds |\n| as400.activejob_activity | disk_io, interactive_transactions | operations/s |\n| as400.activejob_threads | threads | threads |\n\n### Per disk\n\nThese metrics refer to disk instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| disk_unit | Disk_unit identifier |\n| disk_type | Disk_type identifier |\n| disk_model | Disk_model identifier |\n| hardware_status | Hardware_status identifier |\n| disk_serial_number | Disk_serial_number identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| as400.disk_busy | busy | percentage |\n| as400.disk_io_requests | read, write | requests/s |\n| as400.disk_space_usage | used | percentage |\n| as400.disk_capacity | available, used | gigabytes |\n| as400.disk_blocks | read, write | blocks/s |\n| as400.disk_ssd_health | life_remaining | percentage |\n| as400.disk_ssd_age | power_on_days | days |\n\n### Per httpserver\n\nThese metrics refer to httpserver instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| server | Server identifier |\n| function | Function identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| as400.http_server_connections | normal, ssl | connections |\n| as400.http_server_threads | active, idle | threads |\n| as400.http_server_requests | requests, responses, rejected | requests/s |\n| as400.http_server_bytes | received, sent | bytes/s |\n\n### Per jobqueue\n\nThese metrics refer to jobqueue instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| job_queue | Job_queue identifier |\n| library | Library identifier |\n| status | Status identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| as400.jobqueue_length | jobs | jobs |\n\n### Per messagequeue\n\nThese metrics refer to messagequeue instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| library | Library identifier |\n| queue | Queue identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| as400.message_queue_messages | total, informational, inquiry, diagnostic, escape, notify, sender_copy | messages |\n| as400.message_queue_severity | max | severity |\n\n### Per networkinterface\n\nThese metrics refer to networkinterface instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| interface | Interface identifier |\n| interface_type | Interface_type identifier |\n| connection_type | Connection_type identifier |\n| internet_address | Internet_address identifier |\n| network_address | Network_address identifier |\n| subnet_mask | Subnet_mask identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| as400.network_interface_status | active | status |\n| as400.network_interface_mtu | mtu | bytes |\n\n### Per IBM i (AS/400) instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netdata.plugin_ibm.as400_query_latency_fast | count_disks, count_http_servers, count_network_interfaces, detect_ibmi_version_primary, detect_ibmi_version_fallback, disk_instances, disk_instances_enhanced, disk_status, http_server_info, job_info, memory_pools, network_connections, network_interfaces, serial_number, system_name, system_activity, system_model, system_status, temp_storage_named, temp_storage_total, technology_refresh_level, active_job | ms |\n| netdata.plugin_ibm.as400_query_latency_slow | analyze_plan_cache, count_subsystems, subsystems, message_queue_aggregates, job_queues, output_queue_info, plan_cache_summary | ms |\n| netdata.plugin_ibm.as400_query_latency_batch | message_queue_totals, job_queue_totals, output_queue_totals | ms |\n\n### Per outputqueue\n\nThese metrics refer to outputqueue instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| library | Library identifier |\n| queue | Queue identifier |\n| status | Status identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| as400.output_queue_files | files | files |\n| as400.output_queue_writers | writers | writers |\n| as400.output_queue_status | released | state |\n\n### Per plancache\n\nThese metrics refer to plancache instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| metric | Metric identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| as400.plan_cache_summary | value | value |\n\n### Per queueoverview\n\nThese metrics refer to queueoverview instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| queue_type | Queue_type identifier |\n| item_type | Item_type identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| as400.queues_count | queues | queues |\n| as400.queued_items | items | items |\n\n### Per subsystem\n\nThese metrics refer to subsystem instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| subsystem | Subsystem identifier |\n| library | Library identifier |\n| status | Status identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| as400.subsystem_jobs | active, maximum | jobs |\n\n### Per IBM i (AS/400) instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| as400.cpu_utilization | utilization | percentage |\n| as400.cpu_utilization_entitled | utilization | percentage |\n| as400.cpu_configuration | configured | cpus |\n| as400.cpu_capacity | capacity | percentage |\n| as400.total_jobs | total | jobs |\n| as400.active_jobs_by_type | batch, interactive, active | jobs |\n| as400.job_queue_length | waiting | jobs |\n| as400.main_storage_size | total | bytes |\n| as400.temporary_storage | current, maximum | MiB |\n| as400.memory_pool_usage | machine, base, interactive, spool | bytes |\n| as400.memory_pool_defined | machine, base | bytes |\n| as400.memory_pool_reserved | machine, base | bytes |\n| as400.memory_pool_threads | machine, base | threads |\n| as400.memory_pool_max_threads | machine, base | threads |\n| as400.disk_busy_average | busy | percentage |\n| as400.system_asp_usage | used | percentage |\n| as400.system_asp_storage | total | MiB |\n| as400.total_auxiliary_storage | total | MiB |\n| as400.system_threads | active, per_processor | threads |\n| as400.network_connections | remote, total | connections |\n| as400.network_connection_states | listen, close_wait | connections |\n| as400.temp_storage_total | current, peak | bytes |\n| as400.system_activity_cpu_rate | average | percentage |\n| as400.system_activity_cpu_utilization | average, minimum, maximum | percentage |\n\n### Per tempstoragebucket\n\nThese metrics refer to tempstoragebucket instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| bucket | Bucket identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| as400.temp_storage_bucket | current, peak | bytes |\n\n",integration_type:"collector",id:"ibm.d.plugin-as400-IBM_i_(AS/400)",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/ibm.d/modules/as400/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ibm.d.plugin",module_name:"db2",monitored_instance:{name:"IBM DB2",link:"https://www.ibm.com/products/db2",categories:["data-collection.databases"],icon_filename:"ibm.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["db2"]},overview:'# IBM DB2\n\nPlugin: ibm.d.plugin\nModule: db2\n\n## Overview\n\nMonitors IBM DB2 databases using system catalog views and MON_GET_* table\nfunctions to expose connections, locking, buffer pool efficiency, tablespace\ncapacity, and workload performance metrics.\n\nDetailed charts are opt-in per object family through include/exclude lists.\nDefaults focus on engine activity (system connections, core buffer pools,\ncatalog tablespaces). Matching uses glob patterns that can target schema or\napplication names, with include rules taking precedence over excludes.\n\nWhen the number of matching objects exceeds the configured `max_*` limits,\nthe collector publishes deterministic top-N per-instance charts, aggregates\nthe remainder under `group="__other__"`, and logs a throttled warning so you\ncan refine selectors before cardinality runs away. Group charts (by schema,\napplication prefix, or buffer pool family) are always emitted so high-level\nvisibility is preserved even when individual instances are trimmed.\n\n\nThe collector connects to IBM DB2 and collects metrics via its monitoring interface.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn\'t support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\n### Prerequisites\n\n#### Enable monitoring interface\n\nEnsure the IBM DB2 monitoring interface is accessible.\n\n\n\n### Configuration\n\n#### Options\n\nConfiguration options for the db2 collector.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 1 | no |\n| endpoint | Connection endpoint. | dummy://localhost | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ibm.d/db2.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ibm.d/db2.conf\n```\n\n##### Examples\n\n###### Basic\n\nBasic configuration example.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    endpoint: dummy://localhost\n\n```\n{% /details %}\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per bufferpool\n\nThese metrics refer to bufferpool instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| bufferpool | Bufferpool identifier |\n| page_size | Page_size identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.bufferpool_instance_hit_ratio | overall | percentage |\n| db2.bufferpool_instance_detailed_hit_ratio | data, index, xda, column | percentage |\n| db2.bufferpool_instance_reads | logical, physical | reads/s |\n| db2.bufferpool_instance_data_reads | logical, physical | reads/s |\n| db2.bufferpool_instance_index_reads | logical, physical | reads/s |\n| db2.bufferpool_instance_pages | used, total | pages |\n| db2.bufferpool_instance_writes | writes | writes/s |\n\n### Per bufferpoolgroup\n\nThese metrics refer to bufferpoolgroup instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| group | Group identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.bufferpool_group_hit_ratio | overall | percentage |\n| db2.bufferpool_group_detailed_hit_ratio | data, index, xda, column | percentage |\n| db2.bufferpool_group_reads | logical, physical | reads/s |\n| db2.bufferpool_group_data_reads | logical, physical | reads/s |\n| db2.bufferpool_group_index_reads | logical, physical | reads/s |\n| db2.bufferpool_group_pages | used, total | pages |\n| db2.bufferpool_group_writes | writes | writes/s |\n\n### Per connection\n\nThese metrics refer to connection instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| application_id | Application_id identifier |\n| application_name | Application_name identifier |\n| client_hostname | Client_hostname identifier |\n| client_ip | Client_ip identifier |\n| client_user | Client_user identifier |\n| state | State identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.connection_state | state | state |\n| db2.connection_activity | read, written | rows/s |\n| db2.connection_wait_time | lock, log_disk, log_buffer, pool_read, pool_write, direct_read, direct_write, fcm_recv, fcm_send | milliseconds |\n| db2.connection_processing_time | routine, compile, section, commit, rollback | milliseconds |\n\n### Per connectiongroup\n\nThese metrics refer to connectiongroup instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| group | Group identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.connection_group.count | count | connections |\n| db2.connection_group.state | state | state |\n| db2.connection_group.activity | read, written | rows/s |\n| db2.connection_group.wait_time | lock, log_disk, log_buffer, pool_read, pool_write, direct_read, direct_write, fcm_recv, fcm_send | milliseconds |\n| db2.connection_group.processing_time | routine, compile, section, commit, rollback | milliseconds |\n\n### Per database\n\nThese metrics refer to database instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| database | Database identifier |\n| status | Status identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.database_instance_status | status | status |\n| db2.database_applications | applications | applications |\n\n### Per index\n\nThese metrics refer to index instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| index | Index identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.index_usage | index, full | scans/s |\n\n### Per indexgroup\n\nThese metrics refer to indexgroup instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| group | Group identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.index_group_usage | index, full | scans/s |\n\n### Per memorypool\n\nThese metrics refer to memorypool instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| pool_type | Pool_type identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.memory_pool_usage | used | bytes |\n| db2.memory_pool_hwm | hwm | bytes |\n\n### Per memoryset\n\nThese metrics refer to memoryset instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| host | Host identifier |\n| database | Database identifier |\n| set_type | Set_type identifier |\n| member | Member identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.memory_set_usage | used | bytes |\n| db2.memory_set_committed | committed | bytes |\n| db2.memory_set_high_water_mark | hwm | bytes |\n| db2.memory_set_additional_committed | additional | bytes |\n| db2.memory_set_percent_used_hwm | used_hwm | percentage |\n\n### Per prefetcher\n\nThese metrics refer to prefetcher instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| bufferpool | Bufferpool identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.prefetcher_prefetch_ratio | ratio | percentage |\n| db2.prefetcher_cleaner_ratio | ratio | percentage |\n| db2.prefetcher_physical_reads | reads | reads/s |\n| db2.prefetcher_async_reads | reads | reads/s |\n| db2.prefetcher_wait_time | wait_time | milliseconds |\n| db2.prefetcher_unread_pages | unread | pages/s |\n\n### Per IBM DB2 instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.service_health | connection, database | status |\n| db2.connections | total, active, executing, idle, max_allowed | connections |\n| db2.locking | waits, timeouts, escalations | events/s |\n| db2.deadlocks | deadlocks | deadlocks/s |\n| db2.lock_details | active, waiting_agents, memory_pages | locks |\n| db2.lock_wait_time | wait_time | milliseconds |\n| db2.sorting | sorts, overflows | sorts/s |\n| db2.row_activity | read, returned, modified | rows/s |\n| db2.bufferpool_hit_ratio | hits, misses | percentage |\n| db2.bufferpool_data_hit_ratio | hits, misses | percentage |\n| db2.bufferpool_index_hit_ratio | hits, misses | percentage |\n| db2.bufferpool_xda_hit_ratio | hits, misses | percentage |\n| db2.bufferpool_column_hit_ratio | hits, misses | percentage |\n| db2.bufferpool_reads | logical, physical | reads/s |\n| db2.bufferpool_data_reads | logical, physical | reads/s |\n| db2.bufferpool_index_reads | logical, physical | reads/s |\n| db2.bufferpool_xda_reads | logical, physical | reads/s |\n| db2.bufferpool_column_reads | logical, physical | reads/s |\n| db2.bufferpool_writes | writes | writes/s |\n| db2.log_space | used, available | bytes |\n| db2.log_utilization | utilization | percentage |\n| db2.log_io | reads, writes | operations/s |\n| db2.log_operations | commits, rollbacks, reads, writes | operations/s |\n| db2.log_timing | avg_commit, avg_read, avg_write | milliseconds |\n| db2.log_buffer_events | buffer_full | events/s |\n| db2.long_running_queries | total, warning, critical | queries |\n| db2.backup_status | status | status |\n| db2.backup_age | full, incremental | hours |\n| db2.federation_connections | active, idle | connections |\n| db2.federation_operations | rows_read, selects, waits | operations/s |\n| db2.database_status | active, inactive | status |\n| db2.database_count | active, inactive | databases |\n| db2.cpu_usage | user, system, idle, iowait | percentage |\n| db2.active_connections | active, total | connections |\n| db2.memory_usage | database, instance, bufferpool, shared_sort | MiB |\n| db2.sql_statements | selects, modifications | statements/s |\n| db2.transaction_activity | committed, aborted | transactions/s |\n| db2.time_spent | direct_read, direct_write, pool_read, pool_write | milliseconds |\n\n### Per table\n\nThese metrics refer to table instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| table | Table identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.table_size | data, index, long_obj | bytes |\n| db2.table_activity | read, written | rows/s |\n\n### Per tablegroup\n\nThese metrics refer to tablegroup instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| group | Group identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.table_group_size | data, index, long_obj | bytes |\n| db2.table_group_activity | read, written | rows/s |\n\n### Per tableio\n\nThese metrics refer to tableio instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| table | Table identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.table_io_scans | scans | scans/s |\n| db2.table_io_rows | read | rows/s |\n| db2.table_io_activity | inserts, updates, deletes | operations/s |\n| db2.table_io_overflow | overflow | accesses/s |\n\n### Per tablespace\n\nThese metrics refer to tablespace instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| tablespace | Tablespace identifier |\n| type | Type identifier |\n| content_type | Content_type identifier |\n| state | State identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.tablespace_usage | used | percentage |\n| db2.tablespace_size | used, free | bytes |\n| db2.tablespace_usable_size | total, usable | bytes |\n| db2.tablespace_state | state | state |\n\n### Per tablespacegroup\n\nThese metrics refer to tablespacegroup instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| group | Group identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| db2.tablespace_group_usage | used | percentage |\n| db2.tablespace_group_size | used, free | bytes |\n| db2.tablespace_group_usable_size | total, usable | bytes |\n| db2.tablespace_group_state | state | state |\n\n",integration_type:"collector",id:"ibm.d.plugin-db2-IBM_DB2",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/ibm.d/modules/db2/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ibm.d.plugin",module_name:"mq",monitored_instance:{name:"IBM MQ",link:"https://www.ibm.com/products/mq",categories:["data-collection.applications"],icon_filename:"ibm-mq.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["mq"]},overview:"# IBM MQ\n\nPlugin: ibm.d.plugin\nModule: mq\n\n## Overview\n\nMonitors IBM MQ queue managers, queues, channels, and topics\nusing the PCF (Programmable Command Format) protocol.\n\nBy default the collector tracks the critical system queues `SYSTEM.DEAD.LETTER.QUEUE`,\n`SYSTEM.ADMIN.COMMAND.QUEUE`, and `SYSTEM.ADMIN.STATISTICS.QUEUE`. All other queues are\nopt-in via the `include_queues` list, with `exclude_queues` removing noisy patterns such as\n`SYSTEM.*` or `AMQ.*`. Include patterns take precedence over excludes so you can safely\nmonitor individual system queues while dropping the broader wildcard.\n\nPer-queue charts are bounded by `max_queues` (default 50). When more queues are discovered,\nthe collector exports the busiest ones individually, rolls the remainder into an\naggregated `__other__` dimension, and logs a throttled warning listing the overflowed\ngroups. Parallel queue-group charts summarise depth, traffic, and backlog per naming\nprefix (first two dot-separated segments, collapsing all `SYSTEM.*` queues together), so\nhigh-level visibility is never lost even when detailed charts are trimmed.\n\n\nThe collector connects to IBM MQ and collects metrics via its monitoring interface.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Enable monitoring interface\n\nEnsure the IBM MQ monitoring interface is accessible.\n\n\n\n### Configuration\n\n#### Options\n\nConfiguration options for the mq collector.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 1 | no |\n| endpoint | Connection endpoint. | dummy://localhost | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ibm.d/mq.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ibm.d/mq.conf\n```\n\n##### Examples\n\n###### Basic\n\nBasic configuration example.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    endpoint: dummy://localhost\n\n```\n{% /details %}\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per channel\n\nThese metrics refer to channel instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| channel | Channel identifier |\n| type | Type identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mq.channel.status | inactive, binding, starting, running, stopping, retrying, stopped, requesting, paused, disconnected, initializing, switching | status |\n| mq.channel.messages | messages | messages/s |\n| mq.channel.bytes | bytes | bytes/s |\n| mq.channel.batches | batches | batches/s |\n| mq.channel.batch_size | batch_size | messages |\n| mq.channel.batch_interval | batch_interval | milliseconds |\n| mq.channel.intervals | disc_interval, hb_interval, keep_alive_interval | seconds |\n| mq.channel.short_retry_count | short_retry | retries |\n| mq.channel.long_retry_interval | long_retry | seconds |\n| mq.channel.max_msg_length | max_msg_length | bytes |\n| mq.channel.sharing_conversations | sharing_conversations | conversations |\n| mq.channel.network_priority | network_priority | priority |\n| mq.channel.buffer_counts | sent, received | buffers |\n| mq.channel.current_messages | current | messages |\n| mq.channel.xmitq_time | xmitq_time | milliseconds |\n| mq.channel.mca_status | mca_status | status |\n| mq.channel.indoubt_status | indoubt_status | status |\n| mq.channel.ssl_key_resets | ssl_key_resets | resets |\n| mq.channel.npm_speed | npm_speed | speed |\n| mq.channel.current_sharing_convs | current_sharing | conversations |\n\n### Per channelstatistics\n\nThese metrics refer to channelstatistics instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| channel | Channel identifier |\n| type | Type identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mq.channel_stats.messages | messages | messages/s |\n| mq.channel_stats.bytes | bytes | bytes/s |\n| mq.channel_stats.batches | full_batches, incomplete_batches | batches/s |\n| mq.channel_stats.batch_size | avg_batch_size | messages |\n| mq.channel_stats.put_retries | put_retries | retries/s |\n\n### Per listener\n\nThese metrics refer to listener instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| listener | Listener identifier |\n| port | Port identifier |\n| ip_address | Ip_address identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mq.listener.status | stopped, starting, running, stopping, retrying | status |\n| mq.listener.backlog | backlog | connections |\n| mq.listener.uptime | uptime | seconds |\n\n### Per mqistatistics\n\nThese metrics refer to mqistatistics instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| queue_manager | Queue_manager identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mq.mqi_stats.opens | opens_total, opens_failed | operations/s |\n| mq.mqi_stats.closes | closes_total, closes_failed | operations/s |\n| mq.mqi_stats.inqs | inqs_total, inqs_failed | operations/s |\n| mq.mqi_stats.sets | sets_total, sets_failed | operations/s |\n\n### Per queue\n\nThese metrics refer to queue instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| queue | Queue identifier |\n| type | Type identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mq.queue.depth | current, max | messages |\n| mq.queue.depth_percentage | percentage | percentage |\n| mq.queue.messages | enqueued, dequeued | messages/s |\n| mq.queue.connections | input, output | connections |\n| mq.queue.high_depth | high_depth | messages |\n| mq.queue.uncommitted_msgs | uncommitted | messages |\n| mq.queue.file_size | current, max | bytes |\n| mq.queue.last_activity | since_last_get, since_last_put | seconds |\n| mq.queue.oldest_msg_age | oldest_msg_age | seconds |\n| mq.queue.time_indicators | short_period, long_period | microseconds |\n| mq.queue.service_interval | service_interval | milliseconds |\n| mq.queue.inhibit_status | inhibit_get, inhibit_put | status |\n| mq.queue.priority | def_priority | priority |\n| mq.queue.message_persistence | persistent, non_persistent | boolean |\n| mq.queue.retention_interval | retention_interval | hours |\n| mq.queue.triggers | trigger_depth, trigger_type | messages |\n| mq.queue.backout_threshold | backout_threshold | retries |\n| mq.queue.max_msg_length | max_msg_length | bytes |\n| mq.queue.scope | queue_manager, cell | boolean |\n| mq.queue.usage | normal, transmission | boolean |\n| mq.queue.msg_delivery_sequence | priority, fifo | boolean |\n| mq.queue.harden_get_backout | enabled, disabled | boolean |\n\n### Per queuegroup\n\nThese metrics refer to queuegroup instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| group | Group identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mq.queue_group.depth | current, max | messages |\n| mq.queue_group.depth_percentage | percentage | percentage |\n| mq.queue_group.messages | enqueued, dequeued | messages/s |\n| mq.queue_group.connections | input, output | connections |\n| mq.queue_group.uncommitted_msgs | uncommitted | messages |\n| mq.queue_group.file_size | current, max | bytes |\n| mq.queue_group.oldest_msg_age | oldest_msg_age | seconds |\n\n### Per IBM MQ instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mq.qmgr.status | status | status |\n| mq.qmgr.connection_count | connections | connections |\n| mq.qmgr.uptime | uptime | seconds |\n| mq.queues.overview | monitored, excluded, invisible, failed | queues |\n| mq.channels.overview | monitored, excluded, invisible, failed | channels |\n| mq.topics.overview | monitored, excluded, invisible, failed | topics |\n| mq.listeners.overview | monitored, excluded, invisible, failed | listeners |\n\n### Per IBM MQ instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mq.qmgr.cpu_usage | user, system | percentage |\n| mq.qmgr.memory_usage | total | bytes |\n| mq.qmgr.ram_usage | total | bytes |\n| mq.qmgr.log_utilization | used | percentage |\n| mq.qmgr.log_file_size | size | bytes |\n| mq.qmgr.log_write_rate | rate | bytes/s |\n\n### Per queuestatistics\n\nThese metrics refer to queuestatistics instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| queue | Queue identifier |\n| type | Type identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mq.queue_stats.depth_min_max | min_depth, max_depth | messages |\n| mq.queue_stats.avg_queue_time | non_persistent, persistent | microseconds |\n| mq.queue_stats.time_indicators | short_period, long_period | microseconds |\n| mq.queue_stats.operations | puts_non_persistent, puts_persistent, gets_non_persistent, gets_persistent, put1s, browses | operations/s |\n| mq.queue_stats.bytes | put_bytes_non_persistent, put_bytes_persistent, get_bytes_non_persistent, get_bytes_persistent, browse_bytes | bytes/s |\n| mq.queue_stats.failures | puts_failed, put1s_failed, gets_failed, browses_failed | failures/s |\n| mq.queue_stats.message_lifecycle | expired, purged, not_queued | messages/s |\n\n### Per subscription\n\nThese metrics refer to subscription instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| subscription | Subscription identifier |\n| topic | Topic identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mq.subscription.messages | pending | messages |\n| mq.subscription.last_message_age | age | seconds |\n\n### Per topic\n\nThese metrics refer to topic instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| topic | Topic identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mq.topic.publishers | publishers | publishers |\n| mq.topic.subscribers | subscribers | subscribers |\n| mq.topic.messages | messages | messages/s |\n| mq.topic.time_since_last_message | time_since_last_msg | seconds |\n\n",integration_type:"collector",id:"ibm.d.plugin-mq-IBM_MQ",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/ibm.d/modules/mq/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ibm.d.plugin",module_name:"websphere_jmx",monitored_instance:{name:"IBM WebSphere JMX",link:"https://www.ibm.com/products/websphere-application-server",categories:["data-collection.applications"],icon_filename:""},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["websphere_jmx"]},overview:"# IBM WebSphere JMX\n\nPlugin: ibm.d.plugin\nModule: websphere_jmx\n\n## Overview\n\nCollects JVM, thread pool, and middleware metrics from IBM WebSphere Application Server\nvia the embedded JMX bridge helper.\n\n\nThe collector connects to IBM WebSphere JMX and collects metrics via its monitoring interface.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Enable monitoring interface\n\nEnsure the IBM WebSphere JMX monitoring interface is accessible.\n\n\n\n### Configuration\n\n#### Options\n\nConfiguration options for the websphere_jmx collector.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 1 | no |\n| endpoint | Connection endpoint. | dummy://localhost | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ibm.d/websphere_jmx.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ibm.d/websphere_jmx.conf\n```\n\n##### Examples\n\n###### Basic\n\nBasic configuration example.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    endpoint: dummy://localhost\n\n```\n{% /details %}\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per applications\n\nThese metrics refer to applications instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| application | Application identifier |\n| module | Module identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_jmx.app_requests | requests | requests |\n| websphere_jmx.app_response_time | response_time | milliseconds |\n| websphere_jmx.app_sessions_active | active | sessions |\n| websphere_jmx.app_sessions_live | live | sessions |\n| websphere_jmx.app_session_events | creates, invalidates | sessions |\n| websphere_jmx.app_transactions | committed, rolledback | transactions |\n\n### Per jca\n\nThese metrics refer to jca instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| pool | Pool identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_jmx.jca_pool_size | size | connections |\n| websphere_jmx.jca_pool_usage | active, free | connections |\n| websphere_jmx.jca_pool_wait_time | wait | milliseconds |\n| websphere_jmx.jca_pool_use_time | use | milliseconds |\n| websphere_jmx.jca_pool_connections | created, destroyed | connections |\n| websphere_jmx.jca_pool_waiting_threads | waiting | threads |\n\n### Per jdbc\n\nThese metrics refer to jdbc instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| pool | Pool identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_jmx.jdbc_pool_size | size | connections |\n| websphere_jmx.jdbc_pool_usage | active, free | connections |\n| websphere_jmx.jdbc_pool_wait_time | wait | milliseconds |\n| websphere_jmx.jdbc_pool_use_time | use | milliseconds |\n| websphere_jmx.jdbc_pool_connections | created, destroyed | connections |\n| websphere_jmx.jdbc_pool_waiting_threads | waiting | threads |\n\n### Per jms\n\nThese metrics refer to jms instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| destination | Destination identifier |\n| destination_type | Destination_type identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_jmx.jms_messages_current | current | messages |\n| websphere_jmx.jms_messages_pending | pending | messages |\n| websphere_jmx.jms_messages_total | total | messages |\n| websphere_jmx.jms_consumers | consumers | consumers |\n\n### Per IBM WebSphere JMX instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_jmx.jvm_heap_memory | used, committed, max | bytes |\n| websphere_jmx.jvm_heap_usage | usage | percentage |\n| websphere_jmx.jvm_nonheap_memory | used, committed | bytes |\n| websphere_jmx.jvm_gc_count | collections | collections |\n| websphere_jmx.jvm_gc_time | time | milliseconds |\n| websphere_jmx.jvm_threads | total, daemon | threads |\n| websphere_jmx.jvm_thread_states | peak, started | threads |\n| websphere_jmx.jvm_classes | loaded, unloaded | classes |\n| websphere_jmx.jvm_process_cpu_usage | cpu | percentage |\n| websphere_jmx.jvm_uptime | uptime | seconds |\n\n### Per threadpools\n\nThese metrics refer to threadpools instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| pool | Pool identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_jmx.threadpool_size | size, max | threads |\n| websphere_jmx.threadpool_active | active | threads |\n\n",integration_type:"collector",id:"ibm.d.plugin-websphere_jmx-IBM_WebSphere_JMX",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/ibm.d/modules/websphere/jmx/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ibm.d.plugin",module_name:"websphere_mp",monitored_instance:{name:"IBM WebSphere MicroProfile",link:"https://www.ibm.com/products/websphere-application-server",categories:["data-collection.applications"],icon_filename:"ibm.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["websphere_mp"]},overview:"# IBM WebSphere MicroProfile\n\nPlugin: ibm.d.plugin\nModule: websphere_mp\n\n## Overview\n\nCollects JVM, vendor, and REST endpoint metrics from WebSphere Liberty / Open Liberty\nservers via the MicroProfile Metrics (Prometheus/OpenMetrics) endpoint.\n\n\nThe collector connects to IBM WebSphere MicroProfile and collects metrics via its monitoring interface.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Enable monitoring interface\n\nEnsure the IBM WebSphere MicroProfile monitoring interface is accessible.\n\n\n\n### Configuration\n\n#### Options\n\nConfiguration options for the websphere_mp collector.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 1 | no |\n| endpoint | Connection endpoint. | dummy://localhost | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ibm.d/websphere_mp.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ibm.d/websphere_mp.conf\n```\n\n##### Examples\n\n###### Basic\n\nBasic configuration example.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    endpoint: dummy://localhost\n\n```\n{% /details %}\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IBM WebSphere MicroProfile instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_mp.cpu_usage | process, utilization | percentage |\n| websphere_mp.cpu_time | total | seconds |\n\n### Per IBM WebSphere MicroProfile instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_mp.jvm_memory_heap_usage | used, free | bytes |\n| websphere_mp.jvm_memory_heap_committed | committed | bytes |\n| websphere_mp.jvm_memory_heap_max | limit | bytes |\n| websphere_mp.jvm_heap_utilization | utilization | percentage |\n| websphere_mp.jvm_gc_collections | rate | collections/s |\n| websphere_mp.jvm_gc_time | total, per_cycle | milliseconds |\n| websphere_mp.jvm_threads_current | daemon, other | threads |\n| websphere_mp.jvm_threads_peak | peak | threads |\n\n### Per restendpoint\n\nThese metrics refer to restendpoint instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| method | Method identifier |\n| endpoint | Endpoint identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_mp.rest_requests | requests | requests/s |\n| websphere_mp.rest_response_time | average | milliseconds |\n\n### Per IBM WebSphere MicroProfile instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_mp.threadpool_usage | active, idle | threads |\n| websphere_mp.threadpool_size | size | threads |\n\n",integration_type:"collector",id:"ibm.d.plugin-websphere_mp-IBM_WebSphere_MicroProfile",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/ibm.d/modules/websphere/mp/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ibm.d.plugin",module_name:"websphere_pmi",monitored_instance:{name:"IBM WebSphere PMI",link:"https://www.ibm.com/docs/en/was",categories:["data-collection.applications"],icon_filename:"ibm.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["websphere_pmi"]},overview:"# IBM WebSphere PMI\n\nPlugin: ibm.d.plugin\nModule: websphere_pmi\n\n## Overview\n\nCollects WebSphere Application Server performance metrics via the PerfServlet (PMI) interface,\ncovering JVM, thread pools, JDBC/JMS resources, applications, and clustering information.\n\n\nThe collector connects to IBM WebSphere PMI and collects metrics via its monitoring interface.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Enable monitoring interface\n\nEnsure the IBM WebSphere PMI monitoring interface is accessible.\n\n\n\n### Configuration\n\n#### Options\n\nConfiguration options for the websphere_pmi collector.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 1 | no |\n| endpoint | Connection endpoint. | dummy://localhost | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ibm.d/websphere_pmi.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ibm.d/websphere_pmi.conf\n```\n\n##### Examples\n\n###### Basic\n\nBasic configuration example.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: local\n    endpoint: dummy://localhost\n\n```\n{% /details %}\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per alarmmanager\n\nThese metrics refer to alarmmanager instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| manager | Manager identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.alarm_manager_events | created, cancelled, fired | events/s |\n\n### Per dynamiccache\n\nThese metrics refer to dynamiccache instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| cache | Cache identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.dynamic_cache_in_memory | entries | entries |\n| websphere_pmi.dynamic_cache_capacity | max_entries | entries |\n\n### Per enterprisebeans\n\nThese metrics refer to enterprisebeans instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| bean | Bean identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.ejb_operations | create, remove, activate, passivate, instantiate, store, load | operations/s |\n| websphere_pmi.ejb_messages | received, backout | messages/s |\n| websphere_pmi.ejb_pool | ready, live, pooled, active_method, passive, server_session_pool, method_ready, async_queue | beans |\n| websphere_pmi.ejb_time | activation, passivation, create, remove, load, store, method_response, wait, async_wait, read_lock, write_lock | milliseconds |\n\n### Per extensionregistry\n\nThese metrics refer to extensionregistry instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.extension_registry_requests | requests, hits, displacements | events/s |\n| websphere_pmi.extension_registry_hit_rate | hit_rate | percentage |\n\n### Per hamanager\n\nThese metrics refer to hamanager instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.ha_manager_groups | local | groups |\n| websphere_pmi.ha_manager_bulletin_board | subjects, subscriptions, local_subjects, local_subscriptions | items |\n| websphere_pmi.ha_manager_rebuild_time | group_state, bulletin_board | milliseconds |\n\n### Per jcapool\n\nThese metrics refer to jcapool instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| provider | Provider identifier |\n| pool | Pool identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.jca_pool_operations | create, close, allocate, freed, faults | operations/s |\n| websphere_pmi.jca_pool_managed | managed_connections, connection_handles | resources |\n| websphere_pmi.jca_pool_utilization | percent_used, percent_maxed | percentage |\n| websphere_pmi.jca_pool_waiting | waiting_threads | threads |\n\n### Per jdbcpool\n\nThese metrics refer to jdbcpool instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| pool | Pool identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.jdbc_pool_usage | percent_used, percent_maxed | percentage |\n| websphere_pmi.jdbc_pool_waiting | waiting_threads | threads |\n| websphere_pmi.jdbc_pool_connections | managed, handles | connections |\n| websphere_pmi.jdbc_pool_operations | created, closed, allocated, returned, faults, prep_stmt_cache_discard | operations/s |\n| websphere_pmi.jdbc_pool_time | use, wait, jdbc | milliseconds |\n\n### Per jmsqueue\n\nThese metrics refer to jmsqueue instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| engine | Engine identifier |\n| destination | Destination identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.jms_queue_messages_produced | total, best_effort, express, reliable_nonpersistent, reliable_persistent, assured_persistent | messages/s |\n| websphere_pmi.jms_queue_messages_consumed | total, best_effort, express, reliable_nonpersistent, reliable_persistent, assured_persistent, expired | messages/s |\n| websphere_pmi.jms_queue_clients | local_producers, local_producer_attaches, local_consumers, local_consumer_attaches | clients |\n| websphere_pmi.jms_queue_storage | available, unavailable, oldest_age | messages |\n| websphere_pmi.jms_queue_wait_time | aggregate, local | milliseconds |\n\n### Per jmsstore\n\nThese metrics refer to jmsstore instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| engine | Engine identifier |\n| section | Section identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.jms_store_cache | add_stored, add_not_stored, stored_current, stored_bytes, not_stored_current, not_stored_bytes, discard_count, discard_bytes | events |\n| websphere_pmi.jms_store_datastore | insert_batches, update_batches, delete_batches, insert_count, update_count, delete_count, open_count, abort_count, transaction_ms | events/s |\n| websphere_pmi.jms_store_transactions | global_start, global_commit, global_abort, global_indoubt, local_start, local_commit, local_abort | transactions/s |\n| websphere_pmi.jms_store_expiry | index_items | items |\n\n### Per jmstopic\n\nThese metrics refer to jmstopic instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| engine | Engine identifier |\n| destination | Destination identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.jms_topic_publications | assured, best_effort, express | messages/s |\n| websphere_pmi.jms_topic_subscription_hits | assured, best_effort, express | events/s |\n| websphere_pmi.jms_topic_subscriptions | durable_local | subscriptions |\n| websphere_pmi.jms_topic_events | incomplete_publications, publisher_attaches, subscriber_attaches | events/s |\n| websphere_pmi.jms_topic_age | local_oldest | milliseconds |\n\n### Per IBM WebSphere PMI instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.jvm_heap_usage | used, free | bytes |\n| websphere_pmi.jvm_heap_committed | committed | bytes |\n| websphere_pmi.jvm_heap_max | limit | bytes |\n| websphere_pmi.jvm_uptime | uptime | seconds |\n| websphere_pmi.jvm_cpu | usage | percentage |\n| websphere_pmi.jvm_gc_collections | collections | collections/s |\n| websphere_pmi.jvm_gc_time | total | milliseconds |\n| websphere_pmi.jvm_threads | daemon, other | threads |\n| websphere_pmi.jvm_threads_peak | peak | threads |\n\n### Per orb\n\nThese metrics refer to orb instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.orb_concurrent | concurrent_requests | requests |\n| websphere_pmi.orb_requests | requests | requests/s |\n\n### Per objectpool\n\nThese metrics refer to objectpool instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| pool | Pool identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.object_pool_operations | created | operations/s |\n| websphere_pmi.object_pool_size | allocated, returned, idle | objects |\n\n### Per pmiwebservicemodule\n\nThese metrics refer to pmiwebservicemodule instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| module | Module identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.pmi_web_service_module_services | loaded | services |\n\n### Per portlet\n\nThese metrics refer to portlet instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| portlet | Portlet identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.portlet_requests | requests | requests/s |\n| websphere_pmi.portlet_concurrent | concurrent | requests |\n| websphere_pmi.portlet_errors | errors | errors/s |\n| websphere_pmi.portlet_response_time | render, action, process_event, serve_resource | milliseconds |\n\n### Per portletapplication\n\nThese metrics refer to portletapplication instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.portlet_application_loaded | loaded | portlets |\n\n### Per schedulers\n\nThese metrics refer to schedulers instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| scheduler | Scheduler identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.scheduler_activity | finished, failures, polls | events/s |\n\n### Per securityauth\n\nThese metrics refer to securityauth instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.security_auth_counts | web, tai, identity, basic, token, jaas_identity, jaas_basic, jaas_token, rmi | events/s |\n\n### Per securityauthz\n\nThese metrics refer to securityauthz instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.security_authz_time | web, ejb, admin, cwwja | milliseconds |\n\n### Per sessionmanager\n\nThese metrics refer to sessionmanager instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| app | App identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.session_manager_active | active, live | sessions |\n| websphere_pmi.session_manager_events | created, invalidated, timeout_invalidations, affinity_breaks, cache_discards, no_room, activate_non_exist | events/s |\n\n### Per IBM WebSphere PMI instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.cpu_utilization | utilization | percentage |\n\n### Per IBM WebSphere PMI instance\n\nThese metrics refer to the entire monitored instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.system_data_usage | cpu_since_last, free_memory | value |\n\n### Per threadpool\n\nThese metrics refer to threadpool instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| name | Name identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.threadpool_usage | active, size | threads |\n\n### Per transactionmanager\n\nThese metrics refer to transactionmanager instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.transaction_counts | global_begun, global_committed, global_rolled_back, global_timeout, global_involved, optimizations, local_begun, local_committed, local_rolled_back, local_timeout | transactions/s |\n| websphere_pmi.transaction_active | global, local | transactions |\n| websphere_pmi.transaction_time | global_total, global_prepare, global_commit, global_before_completion, local_total, local_commit, local_before_completion | milliseconds |\n\n### Per url\n\nThese metrics refer to url instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| url | Url identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.url_requests | requests | requests/s |\n| websphere_pmi.url_time | service, async | milliseconds |\n\n### Per webapp\n\nThese metrics refer to webapp instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| app | App identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.webapp_load | loaded_servlets, reloads | events |\n\n### Per webservices\n\nThese metrics refer to webservices instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| service | Service identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.web_services_loaded | loaded | services |\n\n### Per webservicesgateway\n\nThese metrics refer to webservicesgateway instances.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | Node identifier |\n| server | Server identifier |\n| gateway | Gateway identifier |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| websphere_pmi.web_services_gateway_requests | synchronous, synchronous_responses, asynchronous, asynchronous_responses | requests/s |\n\n",integration_type:"collector",id:"ibm.d.plugin-websphere_pmi-IBM_WebSphere_PMI",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/ibm.d/modules/websphere/pmi/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"idlejitter.plugin",module_name:"idlejitter.plugin",monitored_instance:{name:"Idle OS Jitter",link:"",categories:["data-collection.synthetic-testing"],icon_filename:"syslog.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["latency","jitter"]},overview:"# Idle OS Jitter\n\nPlugin: idlejitter.plugin\nModule: idlejitter.plugin\n\n## Overview\n\nMonitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service.\n\n\nA thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration will run by default on all supported systems.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| loop time | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20ms | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Idle OS Jitter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.idlejitter | min, max, average | microseconds lost/s |\n\n",integration_type:"collector",id:"idlejitter.plugin-idlejitter.plugin-Idle_OS_Jitter",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/idlejitter.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"ioping.plugin",module_name:"ioping.plugin",monitored_instance:{name:"IOPing",link:"https://github.com/koct9i/ioping",categories:["data-collection.synthetic-testing"],icon_filename:"syslog.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# IOPing\n\nPlugin: ioping.plugin\nModule: ioping.plugin\n\n## Overview\n\nMonitor IOPing metrics for efficient disk I/O latency tracking. Keep track of read/write speeds, latency, and error rates for optimized disk operations.\n\nPlugin uses `ioping` command.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Install ioping\n\nYou can install the command by passing the argument `install` to the plugin (`/usr/libexec/netdata/plugins.d/ioping.plugin install`).\n\n\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Data collection frequency. | 1s | no |\n| destination | The directory/file/device to ioping. |  | yes |\n| request_size | The request size in bytes to ioping the destination (symbolic modifiers are supported) | 4k | no |\n| ioping_opts | Options passed to `ioping` commands. | -T 1000000 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `ioping.conf`.\n\nThe file format is POSIX shell script. Generally, the structure is:\n\n```sh\nOPTION_1="some value"\nOPTION_2="some other value"\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ioping.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\nThis example has the minimum configuration necessary to have the plugin running.\n\n{% details open=true summary="Config" %}\n```yaml\ndestination="/dev/sda"\n\n```\n{% /details %}\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ ioping_disk_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ioping.conf) | ioping.latency | average I/O latency over the last 10 seconds |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ioping.latency | latency | microseconds |\n\n",integration_type:"collector",id:"ioping.plugin-ioping.plugin-IOPing",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/ioping.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"macos.plugin",module_name:"mach_smi",monitored_instance:{name:"macOS",link:"https://www.apple.com/macos",categories:["data-collection.operating-systems"],icon_filename:"macos.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["macos","apple","darwin"]},overview:"# macOS\n\nPlugin: macos.plugin\nModule: mach_smi\n\n## Overview\n\nMonitor macOS metrics for efficient operating system performance.\n\nThe plugin uses three different methods to collect data:\n  - The function `sysctlbyname` is called to collect network, swap, loadavg, and boot time.\n  - The functtion `host_statistic` is called to collect CPU and Virtual memory data;\n  - The function `IOServiceGetMatchingServices` to collect storage information.\n\n\nThis collector is only supported on the following platforms:\n\n- macOS\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThere are three sections in the file which you can configure:\n\n- `[plugin:macos:sysctl]` - Enable or disable monitoring for network, swap, loadavg, and boot time.\n- `[plugin:macos:mach_smi]` - Enable or disable monitoring for CPU and Virtual memory.\n- `[plugin:macos:iokit]` - Enable or disable monitoring for storage device.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enable load average | Enable or disable monitoring of load average metrics (load1, load5, load15). | yes | no |\n| system swap | Enable or disable monitoring of system swap metrics (free, used). | yes | no |\n| bandwidth | Enable or disable monitoring of network bandwidth metrics (received, sent). | yes | no |\n| ipv4 TCP packets | Enable or disable monitoring of IPv4 TCP total packets metrics (received, sent). | yes | no |\n| ipv4 TCP errors | Enable or disable monitoring of IPv4 TCP packets metrics (Input Errors, Checksum, Retransmission segments). | yes | no |\n| ipv4 TCP handshake issues | Enable or disable monitoring of IPv4 TCP handshake metrics (Established Resets, Active Opens, Passive Opens, Attempt Fails). | yes | no |\n| ECN packets | Enable or disable monitoring of ECN statistics metrics (InCEPkts, InNoECTPkts). | auto | no |\n| TCP SYN cookies | Enable or disable monitoring of TCP SYN cookies metrics (received, sent, failed). | auto | no |\n| TCP out-of-order queue | Enable or disable monitoring of TCP out-of-order queue metrics (inqueue). | auto | no |\n| TCP connection aborts | Enable or disable monitoring of TCP connection aborts metrics (Bad Data, User closed, No memory, Timeout). | auto | no |\n| ipv4 UDP packets | Enable or disable monitoring of ipv4 UDP packets metrics (sent, received.). | yes | no |\n| ipv4 UDP errors | Enable or disable monitoring of ipv4 UDP errors metrics (Recieved Buffer error, Input Errors, No Ports, IN Checksum Errors, Ignore Multi). | yes | no |\n| ipv4 icmp packets | Enable or disable monitoring of IPv4 ICMP packets metrics (sent, received, in error, OUT error, IN Checksum error). | yes | no |\n| ipv4 icmp messages | Enable or disable monitoring of ipv4 ICMP messages metrics (I/O messages, I/O Errors, In Checksum). | yes | no |\n| ipv4 packets | Enable or disable monitoring of ipv4 packets metrics (received, sent, forwarded, delivered). | yes | no |\n| ipv4 fragments sent | Enable or disable monitoring of IPv4 fragments sent metrics (ok, fails, creates). | yes | no |\n| ipv4 fragments assembly | Enable or disable monitoring of IPv4 fragments assembly metrics (ok, failed, all). | yes | no |\n| ipv4 errors | Enable or disable monitoring of IPv4 errors metrics (I/O discard, I/O HDR errors, In Addr errors, In Unknown protos, OUT No Routes). | yes | no |\n| ipv6 packets | Enable or disable monitoring of IPv6 packets metrics (received, sent, forwarded, delivered). | auto | no |\n| ipv6 fragments sent | Enable or disable monitoring of IPv6 fragments sent metrics (ok, failed, all). | auto | no |\n| ipv6 fragments assembly | Enable or disable monitoring of IPv6 fragments assembly metrics (ok, failed, timeout, all). | auto | no |\n| ipv6 errors | Enable or disable monitoring of IPv6 errors metrics (I/O Discards, In Hdr Errors, In Addr Errors, In Truncaedd Packets, I/O No Routes). | auto | no |\n| icmp | Enable or disable monitoring of ICMP metrics (sent, received). | auto | no |\n| icmp redirects | Enable or disable monitoring of ICMP redirects metrics (received, sent). | auto | no |\n| icmp errors | Enable or disable monitoring of ICMP metrics (I/O Errors, In Checksums, In Destination Unreachable, In Packet too big, In Time Exceeds, In Parm Problem, Out Dest Unreachable, Out Timee Exceeds, Out Parm Problems.). | auto | no |\n| icmp echos | Enable or disable monitoring of ICMP echos metrics (I/O Echos, I/O Echo Reply). | auto | no |\n| icmp router | Enable or disable monitoring of ICMP router metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp neighbor | Enable or disable monitoring of ICMP neighbor metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp types | Enable or disable monitoring of ICMP types metrics (I/O Type1, I/O Type128, I/O Type129, Out Type133, Out Type135, In Type136, Out Type145). | auto | no |\n| space usage for all disks | Enable or disable monitoring of space usage for all disks metrics (available, used, reserved for root). | yes | no |\n| inodes usage for all disks | Enable or disable monitoring of inodes usage for all disks metrics (available, used, reserved for root). | yes | no |\n| bandwidth | Enable or disable monitoring of bandwidth metrics (received, sent). | yes | no |\n| system uptime | Enable or disable monitoring of system uptime metrics (uptime). | yes | no |\n| cpu utilization | Enable or disable monitoring of CPU utilization metrics (user, nice, system, idel). | yes | no |\n| system ram | Enable or disable monitoring of system RAM metrics (Active, Wired, throttled, compressor, inactive, purgeable, speculative, free). | yes | no |\n| swap i/o | Enable or disable monitoring of SWAP I/O metrics (I/O Swap). | yes | no |\n| memory page faults | Enable or disable monitoring of memory page faults metrics (memory, cow, I/O page, compress, decompress, zero fill, reactivate, purge). | yes | no |\n| disk i/o | Enable or disable monitoring of disk I/O metrics (In, Out). | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\n\n###### Disable swap monitoring.\n\nA basic example that discards swap monitoring\n\n{% details open=true summary="Config" %}\n```yaml\n[plugin:macos:sysctl]\n  system swap = no\n[plugin:macos:mach_smi]\n  swap i/o = no\n\n```\n{% /details %}\n###### Disable complete Machine SMI section.\n\nA basic example that discards swap monitoring\n\n{% details open=true summary="Config" %}\n```yaml\n[plugin:macos:mach_smi]\n  cpu utilization = no\n  system ram = no\n  swap i/o = no\n  memory page faults = no\n  disk i/o = no\n\n```\n{% /details %}\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per macOS instance\n\nThese metrics refer to hardware and network monitoring.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | user, nice, system, idle | percentage |\n| system.ram | active, wired, throttled, compressor, inactive, purgeable, speculative, free | MiB |\n| mem.swapio | io, out | KiB/s |\n| mem.pgfaults | memory, cow, pagein, pageout, compress, decompress, zero_fill, reactivate, purge | faults/s |\n| system.load | load1, load5, load15 | load |\n| mem.swap | free, used | MiB |\n| system.ipv4 | received, sent | kilobits/s |\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| system.uptime | uptime | seconds |\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | read, writes | KiB/s |\n| disk.ops | read, writes | operations/s |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n### Per mount point\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound | drops/s |\n| net.events | frames, collisions, carrier | events/s |\n\n",integration_type:"collector",id:"macos.plugin-mach_smi-macOS",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/macos.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-network-viewer.plugin",plugin_name:"network-viewer.plugin",module_name:"network-viewer.plugin",monitored_instance:{name:"Network Connections",link:"",categories:["data-collection.networking"],icon_filename:"network.svg"},alternative_monitored_instances:[],keywords:["network","connections","sockets","tcp","udp","ports"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# Network Connections\n\nPlugin: network-viewer.plugin\nModule: network-viewer.plugin\n\n## Overview\n\n\n\nThis plugin reads the system's socket tables to enumerate all active network connections,\nincluding TCP and UDP sockets in all states, for both IPv4 and IPv6.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin automatically detects all active network connections on the system.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\n\n",functions:"## Functions\n\nThis plugin exposes a real-time function for viewing active network connections.\n\n\n### Network Connections\n\nShows active network connections with protocol details, states, addresses, ports, and performance metrics.\n\nProvides both aggregated and detailed views of TCP and UDP connections for IPv4 and IPv6,\nincluding connection direction (listen, inbound, outbound, local), process information,\nand TCP performance metrics (RTT, retransmissions).\n\nConnections are classified as system or container based on network namespace.\n\n\n| Aspect | Description |\n|:-------|:------------|\n| Name | `Network-viewer.plugin:network-connections` |\n| Require Cloud | no |\n| Performance |  |\n| Security |  |\n| Availability |  |\n\n#### Prerequisites\n\nNo additional configuration is required.\n\n#### Parameters\n\nThis function has no parameters.\n\n#### Returns\n\n\n\n| Column | Type | Unit | Visibility | Description |\n|:-------|:-----|:-----|:-----------|:------------|\n\n",integration_type:"collector",id:"network-viewer.plugin-network-viewer.plugin-Network_Connections",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/network-viewer.plugin/metadata.yaml",related_resources:""},{meta:{plugin_name:"nfacct.plugin",module_name:"nfacct.plugin",monitored_instance:{name:"Netfilter",link:"https://www.netfilter.org/",categories:["data-collection.networking"],icon_filename:"netfilter.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# Netfilter\n\nPlugin: nfacct.plugin\nModule: nfacct.plugin\n\n## Overview\n\nMonitor Netfilter metrics for optimal packet filtering and manipulation. Keep tabs on packet counts, dropped packets, and error rates to secure network operations.\n\nNetdata uses libmnl (https://www.netfilter.org/projects/libmnl/index.html) to collect information.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin uses socket to connect with netfilter to collect data\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Install required packages\n\nInstall `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system.\n\n\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector |  | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:nfacct]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Netfilter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.netlink_new | new, ignore, invalid | connections/s |\n| netfilter.netlink_changes | insert, delete, delete_list | changes/s |\n| netfilter.netlink_search | searched, search_restart, found | searches/s |\n| netfilter.netlink_errors | icmp_error, insert_failed, drop, early_drop | events/s |\n| netfilter.netlink_expect | created, deleted, new | expectations/s |\n| netfilter.nfacct_packets | a dimension per nfacct object | packets/s |\n| netfilter.nfacct_bytes | a dimension per nfacct object | kilobytes/s |\n\n",integration_type:"collector",id:"nfacct.plugin-nfacct.plugin-Netfilter",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/nfacct.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-otel.plugin-otel",plugin_name:"otel.plugin",module_name:"otel",monitored_instance:{name:"OpenTelemetry",link:"https://opentelemetry.io/",categories:["data-collection.cloud-and-devops"],icon_filename:"opentelemetry.svg"},alternative_monitored_instances:[],keywords:["opentelemetry","otel","otlp","grpc","metrics","logs","observability"],related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""}},overview:"# OpenTelemetry\n\nPlugin: otel.plugin\nModule: otel\n\n## Overview\n\nThis plugin enables the Netdata Agent to receive OpenTelemetry metrics and logs\nvia the OTLP/gRPC protocol from any compatible source \u2014 collectors, SDKs, or\ninstrumented applications.\n\nMetrics are automatically visualized as Netdata charts with full alerting support.\nLogs are stored in systemd-compatible journal files and can be explored through\nthe Netdata Logs tab.\n\n\nThe plugin listens on a configurable gRPC endpoint for incoming OTLP data.\n\nIncoming metrics are mapped to Netdata charts using YAML mapping rules placed in the\nchart configs directory (default `/etc/netdata/otel.d/v1/metrics/`). Each file can\ncontain entries that match metrics by instrumentation scope and metric name, and control\nhow data point attributes translate to chart instances and dimensions. Per-metric\noverrides for the collection interval and grace period are also supported. Without a\nmatching rule, the plugin creates charts using default settings. Charts with no incoming\ndata are automatically expired and removed.\n\n| Mapping file option | Description |\n|:--------------------|:------------|\n| `instrumentation_scope.name` | Regex to match the instrumentation scope name |\n| `instrumentation_scope.version` | Regex to match the instrumentation scope version |\n| `dimension_attribute_key` | Data point attribute whose value becomes the dimension name |\n| `interval_secs` | Per-metric collection interval override (1\u20133600 seconds) |\n| `grace_period_secs` | Per-metric grace period override |\n\nIncoming logs are written to journal files with configurable rotation and retention\npolicies.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin starts automatically and listens on `127.0.0.1:4317` for incoming OTLP/gRPC connections.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### OpenTelemetry data source\n\nAn OpenTelemetry Collector, SDK, or instrumented application configured to send OTLP data\nto the Netdata agent\'s gRPC endpoint.\n\n\n\n### Configuration\n\n#### Options\n\nThe plugin is configured via `otel.yaml` in the Netdata configuration directory.\nOnly the fields you want to change need to be specified.\n\nAny option can also be overridden via environment variables with the `NETDATA_OTEL_`\nprefix (highest priority). The variable name is the config option in all caps with\ndots replaced by underscores \u2014 e.g. `endpoint.tls_cert_path` becomes\n`NETDATA_OTEL_ENDPOINT_TLS_CERT_PATH`.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| endpoint.path | gRPC endpoint to listen on for incoming OTLP data. | 127.0.0.1:4317 | no |\n| endpoint.tls_cert_path | Path to TLS certificate file. Enables TLS when provided. |  | no |\n| endpoint.tls_key_path | Path to TLS private key file. Required when TLS certificate is provided. |  | no |\n| endpoint.tls_ca_cert_path | Path to TLS CA certificate file for client authentication. |  | no |\n| [metrics.chart_configs_dir](#option-metrics-chart-configs-dir) | Directory containing metric mapping YAML files. | /etc/netdata/otel.d/v1/metrics/ | no |\n| metrics.interval_secs | Collection interval in seconds (1\u20133600). Defines the Netdata chart update frequency. | 10 | no |\n| metrics.grace_period_secs | Grace period in seconds. After the last data point, the plugin waits this long before gap-filling. | 60 | no |\n| metrics.expiry_duration_secs | Expiry duration in seconds. Charts with no data for this long are removed. | 900 | no |\n| [metrics.max_new_charts_per_request](#option-metrics-max-new-charts-per-request) | Maximum new charts created per gRPC request. | 100 | no |\n| logs.journal_dir | Directory to store journal files for ingested logs. |  | yes |\n| logs.size_of_journal_file | Maximum file size before rotating to a new journal file. | 100MB | no |\n| logs.entries_of_journal_file | Maximum log entries per journal file. | 50000 | no |\n| logs.duration_of_journal_file | Maximum time span within a single journal file. | 2 hours | no |\n| logs.number_of_journal_files | Maximum number of journal files to keep. | 10 | no |\n| logs.size_of_journal_files | Maximum total size of all journal files. | 1GB | no |\n| logs.duration_of_journal_files | Maximum age of journal files. | 7 days | no |\n| [logs.store_otlp_json](#option-logs-store-otlp-json) | Store the complete OTLP JSON in each log entry. | no | no |\n\n<a id="option-metrics-chart-configs-dir"></a>\n##### metrics.chart_configs_dir\n\nEach file defines how OTLP metrics are mapped to Netdata charts.\nFiles can match metrics by instrumentation scope and name, set the\ndimension attribute key, and override timing parameters. The plugin\nships stock mappings; user files in this directory take priority.\n\n\n<a id="option-metrics-max-new-charts-per-request"></a>\n##### metrics.max_new_charts_per_request\n\nLimits cardinality explosion from high-cardinality label combinations.\n\n\n<a id="option-logs-store-otlp-json"></a>\n##### logs.store_otlp_json\n\nUseful for debugging and reprocessing, but increases storage usage.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `otel.yaml`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config otel.yaml\n```\n\n##### Examples\n\n###### Basic configuration\n\nListen on default endpoint with default settings.\n\n```yaml\nendpoint:\n  path: "127.0.0.1:4317"\nmetrics:\n  chart_configs_dir: /etc/netdata/otel.d/v1/metrics/\n  interval_secs: 10\n  grace_period_secs: 60\n  expiry_duration_secs: 900\n  max_new_charts_per_request: 100\nlogs:\n  journal_dir: /var/log/netdata/otel-journals\n\n```\n###### Partial user override\n\nOverride only specific fields in the user config. All other settings\nare inherited from the stock config. Unknown fields are ignored for\nforward compatibility.\n\n\n{% details open=true summary="Config" %}\n```yaml\nendpoint:\n  path: "0.0.0.0:4317"\nlogs:\n  number_of_journal_files: 20\n  duration_of_journal_files: "14 days"\n\n```\n{% /details %}\n###### Metric mapping file\n\nPlace YAML files like this in `/etc/netdata/otel.d/v1/metrics/` to control how\nOTLP metrics are mapped to Netdata charts. This example maps metrics from the\nOpenTelemetry Collector hostmetrics receiver.\n\n\n{% details open=true summary="Config" %}\n```yaml\nmetrics:\n  "system.network.connections":\n    - instrumentation_scope:\n        name: .*hostmetricsreceiver.*networkscraper$\n      dimension_attribute_key: state\n\n  "system.cpu.utilization":\n    - instrumentation_scope:\n        name: .*hostmetricsreceiver.*cpuscraper$\n      dimension_attribute_key: state\n\n  "system.memory.usage":\n    - instrumentation_scope:\n        name: .*hostmetricsreceiver.*memoryscraper$\n      dimension_attribute_key: state\n      interval_secs: 5\n\n```\n{% /details %}\n###### TLS-enabled configuration\n\nListen with TLS enabled for secure connections.\n\n{% details open=true summary="Config" %}\n```yaml\nendpoint:\n  path: "0.0.0.0:4317"\n  tls_cert_path: /etc/netdata/ssl/cert.pem\n  tls_key_path: /etc/netdata/ssl/key.pem\nmetrics:\n  chart_configs_dir: /etc/netdata/otel.d/v1/metrics/\n  interval_secs: 10\n  grace_period_secs: 60\n  expiry_duration_secs: 900\n  max_new_charts_per_request: 100\nlogs:\n  journal_dir: /var/log/netdata/otel-journals\n\n```\n{% /details %}\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics are dynamically created based on the OpenTelemetry data received.\nThe specific metrics depend on the OTLP sources sending data to the plugin.\n\n",integration_type:"collector",id:"otel.plugin-otel-OpenTelemetry",edit_link:"https://github.com/netdata/netdata/blob/master/src/crates/netdata-otel/otel-plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"perf.plugin",module_name:"perf.plugin",monitored_instance:{name:"CPU performance",link:"https://kernel.org/",categories:["data-collection.operating-systems"],icon_filename:"bolt.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["linux","cpu performance","cpu cache","perf.plugin"]},overview:"# CPU performance\n\nPlugin: perf.plugin\nModule: perf.plugin\n\n## Overview\n\nThis collector monitors CPU performance metrics about cycles, instructions, migrations, cache operations and more.\n\nIt uses syscall (2) to open a file descriptor to monitor the perf events.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIt needs setuid to use the necessary syscall to collect perf events. Netdata sets the permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Install perf plugin\n\nIf you are [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.\n\n\n#### Enable the perf plugin\n\nThe plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software.\n\nTo enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.\n\n```bash\ncd /etc/netdata   # Replace this path with your Netdata config directory, if different\nsudo ./edit-config netdata.conf\n```\n\nChange the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/start-stop-restart.md) for your system.\n\n\n\n### Configuration\n\n#### Options\n\nYou can get the available options running:\n\n```bash\n/usr/libexec/netdata/plugins.d/perf.plugin  --help\n````\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Command options that specify charts shown by the plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. | 1 | yes |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:perf]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\n\n###### All metrics\n\nMonitor all metrics available.\n\n```yaml\n[plugin:perf]\n    command options = all\n\n```\n###### CPU cycles\n\nMonitor CPU cycles.\n\n{% details open=true summary="Config" %}\n```yaml\n[plugin:perf]\n    command options = cycles\n\n```\n{% /details %}\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\nYou can run `perf.plugin` with the debug option enabled to troubleshoot issues with it. The output should give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `perf.plugin` in debug mode:\n\n  ```bash\n  ./perf.plugin 1 all debug\n  ```\n\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CPU performance instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| perf.cpu_cycles | cpu, ref_cpu | cycles/s |\n| perf.instructions | instructions | instructions/s |\n| perf.instructions_per_cycle | ipc | instructions/cycle |\n| perf.branch_instructions | instructions, misses | instructions/s |\n| perf.cache | references, misses | operations/s |\n| perf.bus_cycles | bus | cycles/s |\n| perf.stalled_cycles | frontend, backend | cycles/s |\n| perf.migrations | migrations | migrations |\n| perf.alignment_faults | faults | faults |\n| perf.emulation_faults | faults | faults |\n| perf.l1d_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.l1d_cache_prefetch | prefetches | prefetches/s |\n| perf.l1i_cache | read_access, read_misses | events/s |\n| perf.ll_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.dtlb_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.itlb_cache | read_access, read_misses | events/s |\n| perf.pbu_cache | read_access | events/s |\n\n",integration_type:"collector",id:"perf.plugin-perf.plugin-CPU_performance",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/perf.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/diskstats",monitored_instance:{name:"Disk Statistics",link:"",categories:["data-collection.storage"],icon_filename:"hard-drive.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["disk","disks","io","block devices"]},overview:"# Disk Statistics\n\nPlugin: proc.plugin\nModule: /proc/diskstats\n\n## Overview\n\nDetailed statistics for each of your system's disk devices and partitions.\nThe data is reported by the kernel and can be used to monitor disk activity on a Linux system.\n\nGet valuable insight into how your disks are performing and where potential bottlenecks might be.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_backlog ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.backlog | average backlog size of the ${label:device} disk over the last 10 minutes |\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Disk Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | TBD |\n| mount_point | TBD |\n| device_type | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes | KiB/s |\n| disk_ext.io | discards | KiB/s |\n| disk.ops | reads, writes | operations/s |\n| disk_ext.ops | discards, flushes | operations/s |\n| disk.qops | operations | operations |\n| disk.backlog | backlog | milliseconds |\n| disk.busy | busy | milliseconds |\n| disk.util | utilization | % of time working |\n| disk.mops | reads, writes | merged operations/s |\n| disk_ext.mops | discards | merged operations/s |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk_ext.iotime | discards, flushes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk_ext.await | discards, flushes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk_ext.avgsz | discards | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n",integration_type:"collector",id:"proc.plugin-/proc/diskstats-Disk_Statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/interrupts",monitored_instance:{name:"Interrupts",link:"",categories:["data-collection.operating-systems"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["interrupts"]},overview:"# Interrupts\n\nPlugin: proc.plugin\nModule: /proc/interrupts\n\n## Overview\n\nMonitors `/proc/interrupts`, a file organized by CPU and then by the type of interrupt.\nThe numbers reported are the counts of the interrupts that have occurred of each type.\n\nAn interrupt is a signal to the processor emitted by hardware or software indicating an event that needs\nimmediate attention. The processor then interrupts its current activities and executes the interrupt handler\nto deal with the event. This is part of the way a computer multitasks and handles concurrent processing.\n\nThe types of interrupts include:\n\n- **I/O interrupts**: These are caused by I/O devices like the keyboard, mouse, printer, etc. For example, when\n  you type something on the keyboard, an interrupt is triggered so the processor can handle the new input.\n\n- **Timer interrupts**: These are generated at regular intervals by the system's timer circuit. It's primarily\n  used to switch the CPU among different tasks.\n\n- **Software interrupts**: These are generated by a program requiring disk I/O operations, or other system resources.\n\n- **Hardware interrupts**: These are caused by hardware conditions such as power failure, overheating, etc.\n\nMonitoring `/proc/interrupts` can be used for:\n\n- **Performance tuning**: If an interrupt is happening very frequently, it could be a sign that a device is not\n  configured correctly, or there is a software bug causing unnecessary interrupts. This could lead to system\n  performance degradation.\n\n- **System troubleshooting**: If you're seeing a lot of unexpected interrupts, it could be a sign of a hardware problem.\n\n- **Understanding system behavior**: More generally, keeping an eye on what interrupts are occurring can help you\n  understand what your system is doing. It can provide insights into the system's interaction with hardware,\n  drivers, and other parts of the kernel.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Interrupts instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.interrupts | a dimension per device | interrupts/s |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.interrupts | a dimension per device | interrupts/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/interrupts-Interrupts",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/loadavg",monitored_instance:{name:"System Load Average",link:"",categories:["data-collection.operating-systems"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["load","load average"]},overview:"# System Load Average\n\nPlugin: proc.plugin\nModule: /proc/loadavg\n\n## Overview\n\nThe `/proc/loadavg` file provides information about the system load average.\n\nThe load average is a measure of the amount of computational work that a system performs. It is a\nrepresentation of the average system load over a period of time.\n\nThis file contains three numbers representing the system load averages for the last 1, 5, and 15 minutes,\nrespectively. It also includes the currently running processes and the total number of processes.\n\nMonitoring the load average can be used for:\n\n- **System performance**: If the load average is too high, it may indicate that your system is overloaded.\n  On a system with a single CPU, if the load average is 1, it means the single CPU is fully utilized. If the\n  load averages are consistently higher than the number of CPUs/cores, it may indicate that your system is\n  overloaded and tasks are waiting for CPU time.\n\n- **Troubleshooting**: If the load average is unexpectedly high, it can be a sign of a problem. This could be\n  due to a runaway process, a software bug, or a hardware issue.\n\n- **Capacity planning**: By monitoring the load average over time, you can understand the trends in your\n  system's workload. This can help with capacity planning and scaling decisions.\n\nRemember that load average not only considers CPU usage, but also includes processes waiting for disk I/O.\nTherefore, high load averages could be due to I/O contention as well as CPU contention.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System Load Average instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n| system.active_processes | active | processes |\n\n",integration_type:"collector",id:"proc.plugin-/proc/loadavg-System_Load_Average",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/mdstat",monitored_instance:{name:"MD RAID",link:"",categories:["data-collection.storage"],icon_filename:"hard-drive.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["raid","mdadm","mdstat","raid"]},overview:"# MD RAID\n\nPlugin: proc.plugin\nModule: /proc/mdstat\n\n## Overview\n\nThis integration monitors the status of MD RAID devices.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ mdstat_last_collected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.disks | number of seconds since the last successful data collection |\n| [ mdstat_disks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.disks | number of devices in the down state for the ${label:device} ${label:raid_level} array. Any number > 0 indicates that the array is degraded. |\n| [ mdstat_mismatch_cnt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.mismatch_cnt | number of unsynchronized blocks for the ${label:device} ${label:raid_level} array |\n| [ mdstat_nonredundant_last_collected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.nonredundant | number of seconds since the last successful data collection |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MD RAID instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| md.health | a dimension per md array | failed disks |\n\n### Per md array\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | TBD |\n| raid_level | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| md.disks | inuse, down | disks |\n| md.mismatch_cnt | count | unsynchronized blocks |\n| md.status | check, resync, recovery, reshape | percent |\n| md.expected_time_until_operation_finish | finish_in | seconds |\n| md.operation_speed | speed | KiB/s |\n| md.nonredundant | available | boolean |\n\n",integration_type:"collector",id:"proc.plugin-/proc/mdstat-MD_RAID",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/meminfo",monitored_instance:{name:"Memory Usage",link:"",categories:["data-collection.operating-systems"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["memory","ram","available","committed"]},overview:"# Memory Usage\n\nPlugin: proc.plugin\nModule: /proc/meminfo\n\n## Overview\n\n`/proc/meminfo` provides detailed information about the system's current memory usage. It includes information\nabout different types of memory, RAM, Swap, ZSwap, HugePages, Transparent HugePages (THP), Kernel memory,\nSLAB memory, memory mappings, and more.\n\nMonitoring /proc/meminfo can be useful for:\n\n- **Performance Tuning**: Understanding your system's memory usage can help you make decisions about system\n  tuning and optimization. For example, if your system is frequently low on free memory, it might benefit\n  from more RAM.\n\n- **Troubleshooting**: If your system is experiencing problems, `/proc/meminfo` can provide clues about\n  whether memory usage is a factor. For example, if your system is slow and cached swap is high, it could\n  mean that your system is swapping out a lot of memory to disk, which can degrade performance.\n\n- **Capacity Planning**: By monitoring memory usage over time, you can understand trends and make informed\n  decisions about future capacity needs.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n| [ 1hour_memory_hw_corrupted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.hwcorrupt | amount of memory corrupted due to a hardware failure |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory Usage instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, used, cached, buffers | MiB |\n| mem.available | avail | MiB |\n| mem.swap | free, used | MiB |\n| mem.swap_cached | cached | MiB |\n| mem.zswap | in-ram, on-disk | MiB |\n| mem.hwcorrupt | HardwareCorrupted | MiB |\n| mem.commited | Commited_AS | MiB |\n| mem.writeback | Dirty, Writeback, FuseWriteback, NfsWriteback, Bounce | MiB |\n| mem.kernel | Slab, KernelStack, PageTables, VmallocUsed, Percpu | MiB |\n| mem.slab | reclaimable, unreclaimable | MiB |\n| mem.hugepages | free, used, surplus, reserved | MiB |\n| mem.thp | anonymous, shmem | MiB |\n| mem.thp_details | ShmemPmdMapped, FileHugePages, FilePmdMapped | MiB |\n| mem.reclaiming | Active, Inactive, Active(anon), Inactive(anon), Active(file), Inactive(file), Unevictable, Mlocked | MiB |\n| mem.high_low | high_used, low_used, high_free, low_free | MiB |\n| mem.cma | used, free | MiB |\n| mem.directmaps | 4k, 2m, 4m, 1g | MiB |\n\n",integration_type:"collector",id:"proc.plugin-/proc/meminfo-Memory_Usage",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/dev",monitored_instance:{name:"Network interfaces",link:"",categories:["data-collection.networking"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["network interfaces"]},overview:"# Network interfaces\n\nPlugin: proc.plugin\nModule: /proc/net/dev\n\n## Overview\n\nMonitor network interface metrics about bandwidth, state, errors and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ 1m_received_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | average inbound utilization for the network interface ${label:device} over the last minute |\n| [ 1m_sent_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | average outbound utilization for the network interface ${label:device} over the last minute |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ wifi_inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ wifi_outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ 10min_fifo_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.fifo | number of FIFO errors for the network interface ${label:device} in the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Network interfaces instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| interface_type | TBD |\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.speed | speed | kilobits/s |\n| net.duplex | full, half, unknown | state |\n| net.operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| net.carrier | up, down | state |\n| net.mtu | mtu | octets |\n| net.packets | received, sent, multicast | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.fifo | receive, transmit | errors |\n| net.compressed | received, sent | packets/s |\n| net.events | frames, collisions, carrier | events/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/dev-Network_interfaces",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/ip_vs_stats",monitored_instance:{name:"IP Virtual Server",link:"",categories:["data-collection.networking"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ip virtual server"]},overview:"# IP Virtual Server\n\nPlugin: proc.plugin\nModule: /proc/net/ip_vs_stats\n\n## Overview\n\nThis integration monitors IP Virtual Server statistics\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IP Virtual Server instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipvs.sockets | connections | connections/s |\n| ipvs.packets | received, sent | packets/s |\n| ipvs.net | received, sent | kilobits/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/ip_vs_stats-IP_Virtual_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/netstat",monitored_instance:{name:"Network statistics",link:"",categories:["data-collection.networking"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ip","udp","udplite","icmp","netstat","snmp"]},overview:"# Network statistics\n\nPlugin: proc.plugin\nModule: /proc/net/netstat\n\n## Overview\n\nThis integration provides metrics from the `netstat`, `snmp` and `snmp6` modules.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_tcp_syn_queue_drops ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of SYN requests was dropped due to the full TCP SYN queue over the last minute (SYN cookies were not enabled) |\n| [ 1m_tcp_syn_queue_cookies ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of sent SYN cookies due to the full TCP SYN queue over the last minute |\n| [ 1m_tcp_accept_queue_overflows ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of overflows in the TCP accept queue over the last minute |\n| [ 1m_tcp_accept_queue_drops ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of dropped packets in the TCP accept queue over the last minute |\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ip.tcpsock | TCP connections utilization |\n| [ 1m_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Network statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ip | received, sent | kilobits/s |\n| ip.tcpmemorypressures | pressures | events/s |\n| ip.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger, failed | connections/s |\n| ip.tcpreorders | timestamp, sack, fack, reno | packets/s |\n| ip.tcpofo | inqueue, dropped, merged, pruned | packets/s |\n| ip.tcpsyncookies | received, sent, failed | packets/s |\n| ip.tcp_syn_queue | drops, cookies | packets/s |\n| ip.tcp_accept_queue | overflows, drops | packets/s |\n| ip.tcpsock | connections | active connections |\n| ip.tcppackets | received, sent | packets/s |\n| ip.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ip.tcpopens | active, passive | connections/s |\n| ip.tcphandshake | EstabResets, OutRsts, AttemptFails, SynRetrans | events/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InNoRoutes, OutNoRoutes, InHdrErrors, InAddrErrors, InTruncatedPkts, InCsumErrors | packets/s |\n| ipv4.bcast | received, sent | kilobits/s |\n| ipv4.bcastpkts | received, sent | packets/s |\n| ipv4.mcast | received, sent | kilobits/s |\n| ipv4.mcastpkts | received, sent | packets/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InDestUnreachs, OutDestUnreachs, InRedirects, OutRedirects, InEchos, OutEchos, InRouterAdvert, OutRouterAdvert, InRouterSelect, OutRouterSelect, InTimeExcds, OutTimeExcds, InParmProbs, OutParmProbs, InTimestamps, OutTimestamps, InTimestampReps, OutTimestampReps | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.udplite | received, sent | packets/s |\n| ipv4.udplite_errors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP, ECTP0, ECTP1 | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| system.ipv6 | received, sent | kilobits/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InUnknownProtos, InTooBigErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.bcast | received, sent | kilobits/s |\n| ipv6.mcast | received, sent | kilobits/s |\n| ipv6.mcastpkts | received, sent | packets/s |\n| ipv6.udppackets | received, sent | packets/s |\n| ipv6.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv6.udplitepackets | received, sent | packets/s |\n| ipv6.udpliteerrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors | events/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutPktTooBigs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.groupmemb | InQueries, OutQueries, InResponses, OutResponses, InReductions, OutReductions | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpmldv2 | received, sent | reports/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/netstat-Network_statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/rpc/nfs",monitored_instance:{name:"NFS Client",link:"",categories:["data-collection.storage"],icon_filename:"nfs.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["nfs client","filesystem"]},overview:"# NFS Client\n\nPlugin: proc.plugin\nModule: /proc/net/rpc/nfs\n\n## Overview\n\nThis integration provides statistics from the Linux kernel's NFS Client.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NFS Client instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nfs.net | udp, tcp | operations/s |\n| nfs.rpc | calls, retransmits, auth_refresh | calls/s |\n| nfs.proc2 | a dimension per proc2 call | calls/s |\n| nfs.proc3 | a dimension per proc3 call | calls/s |\n| nfs.proc4 | a dimension per proc4 call | calls/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/rpc/nfs-NFS_Client",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/rpc/nfsd",monitored_instance:{name:"NFS Server",link:"",categories:["data-collection.storage"],icon_filename:"nfs.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["nfs server","filesystem"]},overview:"# NFS Server\n\nPlugin: proc.plugin\nModule: /proc/net/rpc/nfsd\n\n## Overview\n\nThis integration provides statistics from the Linux kernel's NFS Server.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NFS Server instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nfsd.readcache | hits, misses, nocache | reads/s |\n| nfsd.filehandles | stale | handles/s |\n| nfsd.io | read, write | kilobytes/s |\n| nfsd.threads | threads | threads |\n| nfsd.net | udp, tcp | packets/s |\n| nfsd.rpc | calls, bad_format, bad_auth | calls/s |\n| nfsd.proc2 | a dimension per proc2 call | calls/s |\n| nfsd.proc3 | a dimension per proc3 call | calls/s |\n| nfsd.proc4 | a dimension per proc4 call | calls/s |\n| nfsd.proc4ops | a dimension per proc4 operation | operations/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/rpc/nfsd-NFS_Server",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/sctp/snmp",monitored_instance:{name:"SCTP Statistics",link:"",categories:["data-collection.networking"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["sctp","stream control transmission protocol"]},overview:"# SCTP Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sctp/snmp\n\n## Overview\n\nThis integration provides statistics about the Stream Control Transmission Protocol (SCTP).\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SCTP Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sctp.established | established | associations |\n| sctp.transitions | active, passive, aborted, shutdown | transitions/s |\n| sctp.packets | received, sent | packets/s |\n| sctp.packet_errors | invalid, checksum | packets/s |\n| sctp.fragmentation | reassembled, fragmented | packets/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/sctp/snmp-SCTP_Statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/sockstat",monitored_instance:{name:"Socket statistics",link:"",categories:["data-collection.networking"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["sockets"]},overview:"# Socket statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sockstat\n\n## Overview\n\nThis integration provides socket statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_orphans ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_orphans.conf) | ipv4.sockstat_tcp_sockets | orphan IPv4 TCP sockets utilization |\n| [ tcp_memory ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_mem.conf) | ipv4.sockstat_tcp_mem | TCP memory utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Socket statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.sockstat_sockets | used | sockets |\n| ipv4.sockstat_tcp_sockets | alloc, orphan, inuse, timewait | sockets |\n| ipv4.sockstat_tcp_mem | mem | KiB |\n| ipv4.sockstat_udp_sockets | inuse | sockets |\n| ipv4.sockstat_udp_mem | mem | sockets |\n| ipv4.sockstat_udplite_sockets | inuse | sockets |\n| ipv4.sockstat_raw_sockets | inuse | sockets |\n| ipv4.sockstat_frag_sockets | inuse | fragments |\n| ipv4.sockstat_frag_mem | mem | KiB |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/sockstat-Socket_statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/sockstat6",monitored_instance:{name:"IPv6 Socket Statistics",link:"",categories:["data-collection.networking"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ipv6 sockets"]},overview:"# IPv6 Socket Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sockstat6\n\n## Overview\n\nThis integration provides IPv6 socket statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPv6 Socket Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.sockstat6_tcp_sockets | inuse | sockets |\n| ipv6.sockstat6_udp_sockets | inuse | sockets |\n| ipv6.sockstat6_udplite_sockets | inuse | sockets |\n| ipv6.sockstat6_raw_sockets | inuse | sockets |\n| ipv6.sockstat6_frag_sockets | inuse | fragments |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/sockstat6-IPv6_Socket_Statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/softnet_stat",monitored_instance:{name:"Softnet Statistics",link:"",categories:["data-collection.networking"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["softnet"]},overview:"# Softnet Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/softnet_stat\n\n## Overview\n\n`/proc/net/softnet_stat` provides statistics that relate to the handling of network packets by softirq.\n\nIt provides information about:\n\n- Total number of processed packets (`processed`).\n- Times ksoftirq ran out of quota (`dropped`).\n- Times net_rx_action was rescheduled.\n- Number of times processed all lists before quota.\n- Number of times did not process all lists due to quota.\n- Number of times net_rx_action was rescheduled for GRO (Generic Receive Offload) cells.\n- Number of times GRO cells were processed.\n\nMonitoring the /proc/net/softnet_stat file can be useful for:\n\n- **Network performance monitoring**: By tracking the total number of processed packets and how many packets\n  were dropped, you can gain insights into your system's network performance.\n\n- **Troubleshooting**: If you're experiencing network-related issues, this collector can provide valuable clues.\n  For instance, a high number of dropped packets may indicate a network problem.\n\n- **Capacity planning**: If your system is consistently processing near its maximum capacity of network\n  packets, it might be time to consider upgrading your network infrastructure.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Softnet Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |\n\n### Per cpu core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/softnet_stat-Softnet_Statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/stat/nf_conntrack",monitored_instance:{name:"Conntrack",link:"",categories:["data-collection.networking"],icon_filename:"firewall.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["connection tracking mechanism","netfilter","conntrack"]},overview:"# Conntrack\n\nPlugin: proc.plugin\nModule: /proc/net/stat/nf_conntrack\n\n## Overview\n\nThis integration monitors the connection tracking mechanism of Netfilter in the Linux Kernel.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ netfilter_conntrack_full ](https://github.com/netdata/netdata/blob/master/src/health/health.d/netfilter.conf) | netfilter.conntrack_sockets | netfilter connection tracker table size utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Conntrack instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.conntrack_sockets | connections | active connections |\n| netfilter.conntrack_new | new, ignore, invalid | connections/s |\n| netfilter.conntrack_changes | inserted, deleted, delete_list | changes/s |\n| netfilter.conntrack_expect | created, deleted, new | expectations/s |\n| netfilter.conntrack_search | searched, restarted, found | searches/s |\n| netfilter.conntrack_errors | icmp_error, error_failed, drop, early_drop | events/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/stat/nf_conntrack-Conntrack",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/stat/synproxy",monitored_instance:{name:"Synproxy",link:"",categories:["data-collection.networking"],icon_filename:"firewall.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["synproxy"]},overview:"# Synproxy\n\nPlugin: proc.plugin\nModule: /proc/net/stat/synproxy\n\n## Overview\n\nThis integration provides statistics about the Synproxy netfilter module.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Synproxy instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.synproxy_syn_received | received | packets/s |\n| netfilter.synproxy_conn_reopened | reopened | connections/s |\n| netfilter.synproxy_cookies | valid, invalid, retransmits | cookies/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/stat/synproxy-Synproxy",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/net/wireless",monitored_instance:{name:"Wireless network interfaces",link:"",categories:["data-collection.networking"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["wireless devices"]},overview:"# Wireless network interfaces\n\nPlugin: proc.plugin\nModule: /proc/net/wireless\n\n## Overview\n\nMonitor wireless devices with metrics about status, link quality, signal level, noise level and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireless.status | status | status |\n| wireless.link_quality | link_quality | value |\n| wireless.signal_level | signal_level | dBm |\n| wireless.noise_level | noise_level | dBm |\n| wireless.discarded_packets | nwid, crypt, frag, retry, misc | packets/s |\n| wireless.missed_beacons | missed_beacons | frames/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/net/wireless-Wireless_network_interfaces",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/pagetypeinfo",monitored_instance:{name:"Page types",link:"",categories:["data-collection.operating-systems"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["memory page types"]},overview:"# Page types\n\nPlugin: proc.plugin\nModule: /proc/pagetypeinfo\n\n## Overview\n\nThis integration provides metrics about the system's memory page types\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Page types instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pagetype_global | a dimension per pagesize | B |\n\n### Per node, zone, type\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node_id | TBD |\n| node_zone | TBD |\n| node_type | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pagetype | a dimension per pagesize | B |\n\n",integration_type:"collector",id:"proc.plugin-/proc/pagetypeinfo-Page_types",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/pressure",monitored_instance:{name:"Pressure Stall Information",link:"",categories:["data-collection.operating-systems"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["pressure"]},overview:"# Pressure Stall Information\n\nPlugin: proc.plugin\nModule: /proc/pressure\n\n## Overview\n\nIntroduced in Linux kernel 4.20, `/proc/pressure` provides information about system pressure stall information\n(PSI). PSI is a feature that allows the system to track the amount of time the system is stalled due to\nresource contention, such as CPU, memory, or I/O.\n\nThe collectors monitored 3 separate files for CPU, memory, and I/O:\n\n- **cpu**: Tracks the amount of time tasks are stalled due to CPU contention.\n- **memory**: Tracks the amount of time tasks are stalled due to memory contention.\n- **io**: Tracks the amount of time tasks are stalled due to I/O contention.\n- **irq**: Tracks the amount of time tasks are stalled due to IRQ contention.\n\nEach of them provides metrics for stall time over the last 10 seconds, 1 minute, 5 minutes, and 15 minutes.\n\nMonitoring the /proc/pressure files can provide important insights into system performance and capacity planning:\n\n- **Identifying resource contention**: If these metrics are consistently high, it indicates that tasks are\n  frequently being stalled due to lack of resources, which can significantly degrade system performance.\n\n- **Troubleshooting performance issues**: If a system is experiencing performance issues, these metrics can\n  help identify whether resource contention is the cause.\n\n- **Capacity planning**: By monitoring these metrics over time, you can understand trends in resource\n  utilization and make informed decisions about when to add more resources to your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pressure Stall Information instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu_some_pressure | some10, some60, some300 | percentage |\n| system.cpu_some_pressure_stall_time | time | ms |\n| system.cpu_full_pressure | some10, some60, some300 | percentage |\n| system.cpu_full_pressure_stall_time | time | ms |\n| system.memory_some_pressure | some10, some60, some300 | percentage |\n| system.memory_some_pressure_stall_time | time | ms |\n| system.memory_full_pressure | some10, some60, some300 | percentage |\n| system.memory_full_pressure_stall_time | time | ms |\n| system.io_some_pressure | some10, some60, some300 | percentage |\n| system.io_some_pressure_stall_time | time | ms |\n| system.io_full_pressure | some10, some60, some300 | percentage |\n| system.io_full_pressure_stall_time | time | ms |\n\n",integration_type:"collector",id:"proc.plugin-/proc/pressure-Pressure_Stall_Information",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/softirqs",monitored_instance:{name:"SoftIRQ statistics",link:"",categories:["data-collection.operating-systems"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["softirqs","interrupts"]},overview:"# SoftIRQ statistics\n\nPlugin: proc.plugin\nModule: /proc/softirqs\n\n## Overview\n\nIn the Linux kernel, handling of hardware interrupts is split into two halves: the top half and the bottom half.\nThe top half is the routine that responds immediately to an interrupt, while the bottom half is deferred to be processed later.\n\nSoftirqs are a mechanism in the Linux kernel used to handle the bottom halves of interrupts, which can be\ndeferred and processed later in a context where it's safe to enable interrupts.\n\nThe actual work of handling the interrupt is offloaded to a softirq and executed later when the system\ndecides it's a good time to process them. This helps to keep the system responsive by not blocking the top\nhalf for too long, which could lead to missed interrupts.\n\nMonitoring `/proc/softirqs` is useful for:\n\n- **Performance tuning**: A high rate of softirqs could indicate a performance issue. For instance, a high\n  rate of network softirqs (`NET_RX` and `NET_TX`) could indicate a network performance issue.\n\n- **Troubleshooting**: If a system is behaving unexpectedly, checking the softirqs could provide clues about\n  what is going on. For example, a sudden increase in block device softirqs (BLOCK) might indicate a problem\n  with a disk.\n\n- **Understanding system behavior**: Knowing what types of softirqs are happening can help you understand what\n  your system is doing, particularly in terms of how it's interacting with hardware and how it's handling\n  interrupts.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SoftIRQ statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirqs | a dimension per softirq | softirqs/s |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softirqs | a dimension per softirq | softirqs/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/softirqs-SoftIRQ_statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/spl/kstat/zfs/arcstats",monitored_instance:{name:"ZFS Adaptive Replacement Cache",link:"",categories:["data-collection.storage"],icon_filename:"filesystem.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["zfs arc","arc","zfs","filesystem"]},overview:"# ZFS Adaptive Replacement Cache\n\nPlugin: proc.plugin\nModule: /proc/spl/kstat/zfs/arcstats\n\n## Overview\n\nThis integration monitors  ZFS Adadptive Replacement Cache (ARC) statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZFS Adaptive Replacement Cache instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | direct, throttled, indirect | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n\n",integration_type:"collector",id:"proc.plugin-/proc/spl/kstat/zfs/arcstats-ZFS_Adaptive_Replacement_Cache",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/stat",monitored_instance:{name:"System statistics",link:"",categories:["data-collection.operating-systems"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["cpu utilization","process counts"]},overview:"# System statistics\n\nPlugin: proc.plugin\nModule: /proc/stat\n\n## Overview\n\nCPU utilization, states and frequencies and key Linux system performance metrics.\n\nThe `/proc/stat` file provides various types of system statistics:\n\n- The overall system CPU usage statistics\n- Per CPU core statistics\n- The total context switching of the system\n- The total number of processes running\n- The total CPU interrupts\n- The total CPU softirqs\n\nThe collector also reads:\n\n- `/proc/schedstat` for statistics about the process scheduler in the Linux kernel.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/core_throttle_count` to get the count of thermal throttling events for a specific CPU core on Linux systems.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/package_throttle_count` to get the count of thermal throttling events for a specific CPU package on a Linux system.\n- `/sys/devices/system/cpu/[X]/cpufreq/scaling_cur_freq` to get the current operating frequency of a specific CPU core.\n- `/sys/devices/system/cpu/[X]/cpufreq/stats/time_in_state` to get the amount of time the CPU has spent in each of its available frequency states.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/name` to get the names of the idle states for each CPU core in a Linux system.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/time` to get the total time each specific CPU core has spent in each idle state since the system was started.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector auto-detects all metrics. No configuration is needed.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe collector disables cpu frequency and idle state monitoring when there are more than 128 CPU cores available.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| per cpu core utilization | Collects CPU usage metrics for each individual core, in addition to the system-wide averages. | no | no |\n| cpu idle states | Collects CPU idle state residency metrics for each individual core, showing how much time each core spends in different idle states (C-states). | no | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `plugin:proc:/proc/stat` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| system.intr | interrupts | interrupts/s |\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n| system.processes | running, blocked | processes |\n| cpu.core_throttling | a dimension per cpu core | events/s |\n| cpu.package_throttling | a dimension per package | events/s |\n| cpu.cpufreq | a dimension per cpu core | MHz |\n\n### Per cpu core\n\nPer-core CPU metrics. Disabled by default, can be enabled in the [configuration options](#configuration).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cpu | Identifier of the CPU core (e.g., core0, core1, core2). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| cpuidle.cpu_cstate_residency_time | a dimension per c-state | percentage |\n\n",integration_type:"collector",id:"proc.plugin-/proc/stat-System_statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/sys/kernel/random/entropy_avail",monitored_instance:{name:"Entropy",link:"",categories:["data-collection.operating-systems"],icon_filename:"syslog.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["entropy"]},overview:"# Entropy\n\nPlugin: proc.plugin\nModule: /proc/sys/kernel/random/entropy_avail\n\n## Overview\n\nEntropy, a measure of the randomness or unpredictability of data.\n\nIn the context of cryptography, entropy is used to generate random numbers or keys that are essential for\nsecure communication and encryption. Without a good source of entropy, cryptographic protocols can become\nvulnerable to attacks that exploit the predictability of the generated keys.\n\nIn most operating systems, entropy is generated by collecting random events from various sources, such as\nhardware interrupts, mouse movements, keyboard presses, and disk activity. These events are fed into a pool\nof entropy, which is then used to generate random numbers when needed.\n\nThe `/dev/random` device in Linux is one such source of entropy, and it provides an interface for programs\nto access the pool of entropy. When a program requests random numbers, it reads from the `/dev/random` device,\nwhich blocks until enough entropy is available to generate the requested numbers. This ensures that the\ngenerated numbers are truly random and not predictable. \n\nHowever, if the pool of entropy gets depleted, the `/dev/random` device may block indefinitely, causing\nprograms that rely on random numbers to slow down or even freeze. This is especially problematic for\ncryptographic protocols that require a continuous stream of random numbers, such as SSL/TLS and SSH.\n\nTo avoid this issue, some systems use a hardware random number generator (RNG) to generate high-quality\nentropy. A hardware RNG generates random numbers by measuring physical phenomena, such as thermal noise or\nradioactive decay. These sources of randomness are considered to be more reliable and unpredictable than\nsoftware-based sources.\n\nOne such hardware RNG is the Trusted Platform Module (TPM), which is a dedicated hardware chip that is used\nfor cryptographic operations and secure boot. The TPM contains a built-in hardware RNG that generates\nhigh-quality entropy, which can be used to seed the pool of entropy in the operating system.\n\nAlternatively, software-based solutions such as `Haveged` can be used to generate additional entropy by\nexploiting sources of randomness in the system, such as CPU utilization and network traffic. These solutions\ncan help to mitigate the risk of entropy depletion, but they may not be as reliable as hardware-based solutions.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ lowest_entropy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/entropy.conf) | system.entropy | minimum number of bits of entropy available for the kernel\u2019s random number generator |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Entropy instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.entropy | entropy | entropy |\n\n",integration_type:"collector",id:"proc.plugin-/proc/sys/kernel/random/entropy_avail-Entropy",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/uptime",monitored_instance:{name:"System Uptime",link:"",categories:["data-collection.operating-systems"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["uptime"]},overview:"# System Uptime\n\nPlugin: proc.plugin\nModule: /proc/uptime\n\n## Overview\n\nThe amount of time the system has been up (running).\n\nUptime is a critical aspect of overall system performance:\n\n- **Availability**: Uptime monitoring can show whether a server is consistently available or experiences frequent downtimes.\n- **Performance Monitoring**: While server uptime alone doesn't provide detailed performance data, analyzing the duration and frequency of downtimes can help identify patterns or trends.\n- **Proactive problem detection**: If server uptime monitoring reveals unexpected downtimes or a decreasing uptime trend, it can serve as an early warning sign of potential problems.\n- **Root cause analysis**: When investigating server downtime, the uptime metric alone may not provide enough information to pinpoint the exact cause.\n- **Load balancing**: Uptime data can indirectly indicate load balancing issues if certain servers have significantly lower uptimes than others.\n- **Optimize maintenance efforts**: Servers with consistently low uptimes or frequent downtimes may require more attention.\n- **Compliance requirements**: Server uptime data can be used to demonstrate compliance with regulatory requirements or SLAs that mandate a minimum level of server availability.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System Uptime instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n",integration_type:"collector",id:"proc.plugin-/proc/uptime-System_Uptime",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/proc/vmstat",monitored_instance:{name:"Memory Statistics",link:"",categories:["data-collection.operating-systems"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["swap","page faults","oom","numa"]},overview:"# Memory Statistics\n\nPlugin: proc.plugin\nModule: /proc/vmstat\n\n## Overview\n\nLinux Virtual memory subsystem.\n\nInformation about memory management, indicating how effectively the kernel allocates and frees\nmemory resources in response to system demands.\n\nMonitors page faults, which occur when a process requests a portion of its memory that isn't\nimmediately available. Monitoring these events can help diagnose inefficiencies in memory management and\nprovide insights into application behavior.\n\nTracks swapping activity \u2014 a vital aspect of memory management where the kernel moves data from RAM to\nswap space, and vice versa, based on memory demand and usage. It also monitors the utilization of zswap,\na compressed cache for swap pages, and provides insights into its usage and performance implications.\n\nIn the context of virtualized environments, it tracks the ballooning mechanism which is used to balance\nmemory resources between host and guest systems.\n\nFor systems using NUMA architecture, it provides insights into the local and remote memory accesses, which\ncan impact the performance based on the memory access times.\n\nThe collector also watches for 'Out of Memory' kills, a drastic measure taken by the system when it runs out\nof memory resources.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n| [ oom_kill ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.oom_kill | number of out of memory kills in the last 30 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | in, out | KiB/s |\n| system.pgpgio | in, out | KiB/s |\n| system.pgfaults | minor, major | faults/s |\n| mem.balloon | inflate, deflate, migrate | KiB/s |\n| mem.zswapio | in, out | KiB/s |\n| mem.ksm_cow | swapin, write | KiB/s |\n| mem.thp_faults | alloc, fallback, fallback_charge | events/s |\n| mem.thp_file | alloc, fallback, mapped, fallback_charge | events/s |\n| mem.thp_zero | alloc, failed | events/s |\n| mem.thp_collapse | alloc, failed | events/s |\n| mem.thp_split | split, failed, split_pmd, split_deferred | events/s |\n| mem.thp_swapout | swapout, fallback | events/s |\n| mem.thp_compact | success, fail, stall | events/s |\n| mem.oom_kill | kills | kills/s |\n| mem.numa | local, foreign, interleave, other, pte_updates, huge_pte_updates, hint_faults, hint_faults_local, pages_migrated | events/s |\n\n",integration_type:"collector",id:"proc.plugin-/proc/vmstat-Memory_Statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/block/zram",monitored_instance:{name:"ZRAM",link:"",categories:["data-collection.operating-systems"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["zram"]},overview:"# ZRAM\n\nPlugin: proc.plugin\nModule: /sys/block/zram\n\n## Overview\n\nzRAM, or compressed RAM, is a block device that uses a portion of your system's RAM as a block device.\nThe data written to this block device is compressed and stored in memory.\n\nThe collectors provides information about the operation and the effectiveness of zRAM on your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zram device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.zram_usage | compressed, metadata | MiB |\n| mem.zram_savings | savings, original | MiB |\n| mem.zram_ratio | ratio | ratio |\n| mem.zram_efficiency | percent | percentage |\n\n",integration_type:"collector",id:"proc.plugin-/sys/block/zram-ZRAM",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/class/drm",monitored_instance:{name:"AMD GPU",link:"https://www.amd.com",categories:["data-collection.hardware-and-sensors"],icon_filename:"amd.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["amd","gpu","hardware"]},overview:"# AMD GPU\n\nPlugin: proc.plugin\nModule: /sys/class/drm\n\n## Overview\n\nThis integration monitors AMD GPU metrics, such as utilization, clock frequency and memory usage.\n\nIt reads `/sys/class/drm` to collect metrics for every AMD GPU card instance it encounters.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| product_name | GPU product name (e.g. AMD RX 6600) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| amdgpu.gpu_utilization | utilization | percentage |\n| amdgpu.gpu_mem_utilization | utilization | percentage |\n| amdgpu.gpu_clk_frequency | frequency | MHz |\n| amdgpu.gpu_mem_clk_frequency | frequency | MHz |\n| amdgpu.gpu_mem_vram_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_vram_usage | free, used | bytes |\n| amdgpu.gpu_mem_vis_vram_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_vis_vram_usage | free, used | bytes |\n| amdgpu.gpu_mem_gtt_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_gtt_usage | free, used | bytes |\n\n",integration_type:"collector",id:"proc.plugin-/sys/class/drm-AMD_GPU",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/class/infiniband",monitored_instance:{name:"InfiniBand",link:"",categories:["data-collection.networking"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["infiniband","rdma"]},overview:"# InfiniBand\n\nPlugin: proc.plugin\nModule: /sys/class/infiniband\n\n## Overview\n\nThis integration monitors InfiniBand network inteface statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per infiniband port\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ib.bytes | Received, Sent | kilobits/s |\n| ib.packets | Received, Sent, Mcast_rcvd, Mcast_sent, Ucast_rcvd, Ucast_sent | packets/s |\n| ib.errors | Pkts_malformated, Pkts_rcvd_discarded, Pkts_sent_discarded, Tick_Wait_to_send, Pkts_missed_resource, Buffer_overrun, Link_Downed, Link_recovered, Link_integrity_err, Link_minor_errors, Pkts_rcvd_with_EBP, Pkts_rcvd_discarded_by_switch, Pkts_sent_discarded_by_switch | errors/s |\n| ib.hwerrors | Duplicated_packets, Pkt_Seq_Num_gap, Ack_timer_expired, Drop_missing_buffer, Drop_out_of_sequence, NAK_sequence_rcvd, CQE_err_Req, CQE_err_Resp, CQE_Flushed_err_Req, CQE_Flushed_err_Resp, Remote_access_err_Req, Remote_access_err_Resp, Remote_invalid_req, Local_length_err_Resp, RNR_NAK_Packets, CNP_Pkts_ignored, RoCE_ICRC_Errors | errors/s |\n| ib.hwpackets | RoCEv2_Congestion_sent, RoCEv2_Congestion_rcvd, IB_Congestion_handled, ATOMIC_req_rcvd, Connection_req_rcvd, Read_req_rcvd, Write_req_rcvd, RoCE_retrans_adaptive, RoCE_retrans_timeout, RoCE_slow_restart, RoCE_slow_restart_congestion, RoCE_slow_restart_count | packets/s |\n\n",integration_type:"collector",id:"proc.plugin-/sys/class/infiniband-InfiniBand",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/class/power_supply",monitored_instance:{name:"Power Supply",link:"",categories:["data-collection.hardware-and-sensors"],icon_filename:"powersupply.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["psu","power supply"]},overview:"# Power Supply\n\nPlugin: proc.plugin\nModule: /sys/class/power_supply\n\n## Overview\n\nThis integration monitors Power supply metrics, such as battery status, AC power status and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ power_supply_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/power_supply_capacity.conf) | powersupply.capacity | percentage of remaining power supply capacity |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per power device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powersupply.capacity | capacity | percentage |\n| powersupply.power | power | W |\n| powersupply.charge | empty_design, empty, now, full, full_design | Ah |\n| powersupply.energy | empty_design, empty, now, full, full_design | Wh |\n| powersupply.voltage | min_design, min, now, max, max_design | V |\n\n",integration_type:"collector",id:"proc.plugin-/sys/class/power_supply-Power_Supply",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/devices/system/edac/mc",monitored_instance:{name:"Memory modules (DIMMs)",link:"",categories:["data-collection.hardware-and-sensors"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["edac","ecc","dimm","ram","hardware"]},overview:"# Memory modules (DIMMs)\n\nPlugin: proc.plugin\nModule: /sys/devices/system/edac/mc\n\n## Overview\n\nThe Error Detection and Correction (EDAC) subsystem is detecting and reporting errors in the system's memory,\nprimarily ECC (Error-Correcting Code) memory errors.\n\nThe collector provides data for:\n\n- Per memory controller (MC): correctable and uncorrectable errors. These can be of 2 kinds:\n  - errors related to a DIMM\n  - errors that cannot be associated with a DIMM\n\n- Per memory DIMM: correctable and uncorrectable errors. There are 2 kinds:\n  - memory controllers that can identify the physical DIMMS and report errors directly for them,\n  - memory controllers that report errors for memory address ranges that can be linked to dimms.\n    In this case the DIMMS reported may be more than the physical DIMMS installed.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ ecc_memory_mc_noinfo_correctable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_errors | memory controller ${label:controller} ECC correctable errors (unknown DIMM slot) |\n| [ ecc_memory_mc_noinfo_uncorrectable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_errors | memory controller ${label:controller} ECC uncorrectable errors (unknown DIMM slot) |\n| [ ecc_memory_dimm_correctable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_dimm_errors | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC correctable errors |\n| [ ecc_memory_dimm_uncorrectable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_dimm_errors | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC uncorrectable errors |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per memory controller\n\nThese metrics refer to the memory controller.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |\n| mc_name | Memory controller type. |\n| size_mb | The amount of memory in megabytes that this memory controller manages. |\n| max_location | Last available memory slot in this memory controller. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.edac_mc_errors | correctable, uncorrectable, correctable_noinfo, uncorrectable_noinfo | errors |\n\n### Per memory module\n\nThese metrics refer to the memory module (or rank, [depends on the memory controller](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#f5)).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |\n| dimm | [dimmX or rankX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#dimmx-or-rankx-directories) directory name of this memory module. |\n| dimm_dev_type | Type of DRAM device used in this memory module. For example, x1, x2, x4, x8. |\n| dimm_edac_mode | Used type of error detection and correction. For example, S4ECD4ED would mean a Chipkill with x4 DRAM. |\n| dimm_label | Label assigned to this memory module. |\n| dimm_location | Location of the memory module. |\n| dimm_mem_type | Type of the memory module. |\n| size | The amount of memory in megabytes that this memory module manages. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.edac_mc_errors | correctable, uncorrectable | errors |\n\n",integration_type:"collector",id:"proc.plugin-/sys/devices/system/edac/mc-Memory_modules_(DIMMs)",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/devices/system/node",monitored_instance:{name:"Non-Uniform Memory Access",link:"",categories:["data-collection.operating-systems"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["numa"]},overview:"# Non-Uniform Memory Access\n\nPlugin: proc.plugin\nModule: /sys/devices/system/node\n\n## Overview\n\nInformation about NUMA (Non-Uniform Memory Access) nodes on the system.\n\nNUMA is a method of configuring a cluster of microprocessor in a multiprocessing system so that they can\nshare memory locally, improving performance and the ability of the system to be expanded. NUMA is used in a\nsymmetric multiprocessing (SMP) system.\n\nIn a NUMA system, processors, memory, and I/O devices are grouped together into cells, also known as nodes.\nEach node has its own memory and set of I/O devices, and one or more processors. While a processor can access\nmemory in any of the nodes, it does so faster when accessing memory within its own node.\n\nThe collector provides statistics on memory allocations for processes running on the NUMA nodes, revealing the\nefficiency of memory allocations in multi-node systems.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per numa node\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| numa_node | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.numa_nodes | hit, miss, local, foreign, interleave, other | events/s |\n\n",integration_type:"collector",id:"proc.plugin-/sys/devices/system/node-Non-Uniform_Memory_Access",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/fs/bcache",monitored_instance:{name:"BCache",link:"https://bcache.evilpiepirate.org/",categories:["data-collection.storage"],icon_filename:"hard-drive.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["bcache","cache","ssd","block devices"]},overview:"# BCache\n\nPlugin: proc.plugin\nModule: /sys/fs/bcache\n\n## Overview\n\nStatistics for BCache (block layer cache) devices, including cache hit ratios, I/O operations, cache allocations, and bypass activity.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAutomatically detects BCache devices by reading stats from `/sys/block/*/bcache/`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ bcache_cache_dirty ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bcache.conf) | disk.bcache_cache_alloc | percentage of cache space used for dirty data and metadata (this usually means your SSD cache is too small) |\n| [ bcache_cache_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bcache.conf) | disk.bcache_cache_read_races | number of times data was read from the cache, the bucket was reused and invalidated in the last 10 minutes (when this occurs the data is reread from the backing device) |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | TBD |\n| mount_point | TBD |\n| device_type | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.bcache_cache_alloc | ununsed, dirty, clean, metadata, undefined | percentage |\n| disk.bcache_hit_ratio | 5min, 1hour, 1day, ever | percentage |\n| disk.bcache_rates | congested, writeback | KiB/s |\n| disk.bcache_size | dirty | MiB |\n| disk.bcache_usage | avail | percentage |\n| disk.bcache_cache_read_races | races, errors | operations/s |\n| disk.bcache | hits, misses, collisions, readaheads | operations/s |\n| disk.bcache_bypass | hits, misses | operations/s |\n\n",integration_type:"collector",id:"proc.plugin-/sys/fs/bcache-BCache",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/fs/btrfs",monitored_instance:{name:"BTRFS",link:"",categories:["data-collection.storage"],icon_filename:"filesystem.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["btrfs","filesystem"]},overview:"# BTRFS\n\nPlugin: proc.plugin\nModule: /sys/fs/btrfs\n\n## Overview\n\nThis integration provides usage and error statistics from the BTRFS filesystem.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ btrfs_allocated ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.disk | percentage of allocated BTRFS physical disk space |\n| [ btrfs_data ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.data | utilization of BTRFS data space |\n| [ btrfs_metadata ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.metadata | utilization of BTRFS metadata space |\n| [ btrfs_system ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.system | utilization of BTRFS system space |\n| [ btrfs_device_read_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS read errors |\n| [ btrfs_device_write_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS write errors |\n| [ btrfs_device_flush_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS flush errors |\n| [ btrfs_device_corruption_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS corruption errors |\n| [ btrfs_device_generation_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS generation errors |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per btrfs filesystem\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| filesystem_uuid | TBD |\n| filesystem_label | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| btrfs.disk | unallocated, data_free, data_used, meta_free, meta_used, sys_free, sys_used | MiB |\n| btrfs.data | free, used | MiB |\n| btrfs.metadata | free, used, reserved | MiB |\n| btrfs.system | free, used | MiB |\n| btrfs.commits | commits | commits |\n| btrfs.commits_perc_time | commits | percentage |\n| btrfs.commit_timings | last, max | ms |\n\n### Per btrfs device\n\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device_id | TBD |\n| filesystem_uuid | TBD |\n| filesystem_label | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| btrfs.device_errors | write_errs, read_errs, flush_errs, corruption_errs, generation_errs | errors |\n\n",integration_type:"collector",id:"proc.plugin-/sys/fs/btrfs-BTRFS",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"/sys/kernel/mm/ksm",monitored_instance:{name:"Kernel Same-Page Merging",link:"",categories:["data-collection.operating-systems"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ksm","samepage","merging"]},overview:"# Kernel Same-Page Merging\n\nPlugin: proc.plugin\nModule: /sys/kernel/mm/ksm\n\n## Overview\n\nKernel Samepage Merging (KSM) is a memory-saving feature in Linux that enables the kernel to examine the\nmemory of different processes and identify identical pages. It then merges these identical pages into a\nsingle page that the processes share. This is particularly useful for virtualization, where multiple virtual\nmachines might be running the same operating system or applications and have many identical pages.\n\nThe collector provides information about the operation and effectiveness of KSM on your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kernel Same-Page Merging instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.ksm | shared, unshared, sharing, volatile | MiB |\n| mem.ksm_savings | savings, offered | MiB |\n| mem.ksm_ratios | savings | percentage |\n\n",integration_type:"collector",id:"proc.plugin-/sys/kernel/mm/ksm-Kernel_Same-Page_Merging",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"proc.plugin",module_name:"ipc",monitored_instance:{name:"Inter Process Communication",link:"",categories:["data-collection.operating-systems"],icon_filename:"network-wired.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ipc","semaphores","shared memory"]},overview:"# Inter Process Communication\n\nPlugin: proc.plugin\nModule: ipc\n\n## Overview\n\nIPC stands for Inter-Process Communication. It is a mechanism which allows processes to communicate with each\nother and synchronize their actions.\n\nThis collector exposes information about:\n\n- Message Queues: This allows messages to be exchanged between processes. It's a more flexible method that\n  allows messages to be placed onto a queue and read at a later time.\n\n- Shared Memory: This method allows for the fastest form of IPC because processes can exchange data by\n  reading/writing into shared memory segments.\n\n- Semaphores: They are used to synchronize the operations performed by independent processes. So, if multiple\n  processes are trying to access a single shared resource, semaphores can ensure that only one process\n  accesses the resource at a given time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\nThere are no configuration options.\n\n\n\n#### via File\n\nThere is no configuration file.\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Inter Process Communication instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n| system.message_queue_message | a dimension per queue | messages |\n| system.message_queue_bytes | a dimension per queue | bytes |\n| system.shared_memory_segments | segments | segments |\n| system.shared_memory_bytes | bytes | bytes |\n\n",integration_type:"collector",id:"proc.plugin-ipc-Inter_Process_Communication",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"am2320",monitored_instance:{name:"AM2320",link:"https://learn.adafruit.com/adafruit-am2320-temperature-humidity-i2c-sensor/overview",categories:["data-collection.hardware-and-sensors"],icon_filename:"microchip.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["temperature","am2320","sensor","humidity"]},overview:"# AM2320\n\nPlugin: python.d.plugin\nModule: am2320\n\n## Overview\n\nThis collector monitors AM2320 sensor metrics about temperature and humidity.\n\nIt retrieves temperature and humidity values by contacting an AM2320 sensor over i2c.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming prerequisites are met, the collector will try to connect to the sensor via i2c\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\n#### Sensor connection to a Raspberry Pi\n\nConnect the am2320 to the Raspberry Pi I2C pins\n\nRaspberry Pi 3B/4 Pins:\n\n- Board 3.3V (pin 1) to sensor VIN (pin 1)\n- Board SDA (pin 3) to sensor SDA (pin 2)\n- Board GND (pin 6) to sensor GND (pin 3)\n- Board SCL (pin 5) to sensor SCL (pin 4)\n\nYou may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.\n\n\n#### Software requirements\n\nInstall the Adafruit Circuit Python AM2320 library:\n\n`sudo pip3 install adafruit-circuitpython-am2320`\n\n\n\n### Configuration\n\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. |  | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `python.d/am2320.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\n\njob_name:\n  job_option1: some_value\n  job_option2: some_other_vlaue\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/am2320.conf\n```\n\n##### Examples\n\n###### Local sensor\n\nA basic JOB configuration\n\n```yaml\nlocal_sensor:\n  name: 'Local AM2320'\n\n```\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `am2320` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n  ```bash\n  ./python.d.plugin am2320 debug trace\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `am2320` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep am2320\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep am2320 /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep am2320\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per AM2320 instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| am2320.temperature | temperature | celsius |\n| am2320.humidity | humidity | percentage |\n\n",integration_type:"collector",id:"python.d.plugin-am2320-AM2320",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/am2320/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"go_expvar",monitored_instance:{name:"Go applications (EXPVAR)",link:"https://pkg.go.dev/expvar",categories:["data-collection.applications"],icon_filename:"go.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["go","expvar","application"]},overview:"# Go applications (EXPVAR)\n\nPlugin: python.d.plugin\nModule: go_expvar\n\n## Overview\n\nThis collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts.\n\nIt connects via http to gather the metrics exposed via the `expvar` package.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Enable the go_expvar collector\n\nThe `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata   # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/start-stop-restart.md) for your system.\n\n\n#### Sample `expvar` usage in a Go application\n\nThe `expvar` package exposes metrics over HTTP and is very easy to use.\nConsider this minimal sample below:\n\n```go\npackage main\n\nimport (\n        _ "expvar"\n        "net/http"\n)\n\nfunc main() {\n        http.ListenAndServe("127.0.0.1:8080", nil)\n}\n```\n\nWhen imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that\nexposes Go runtime\'s memory statistics in JSON format. You can inspect the output by opening\nthe URL in your browser (or by using `wget` or `curl`).\n\nSample output:\n\n```json\n{\n"cmdline": ["./expvar-demo-binary"],\n"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <omitted for brevity>}\n}\n```\n\nYou can of course expose and monitor your own variables as well.\nHere is a sample Go application that exposes a few custom variables:\n\n```go\npackage main\n\nimport (\n    "expvar"\n    "net/http"\n    "runtime"\n    "time"\n)\n\nfunc main() {\n\n    tick := time.NewTicker(1 * time.Second)\n    num_go := expvar.NewInt("runtime.goroutines")\n    counters := expvar.NewMap("counters")\n    counters.Set("cnt1", new(expvar.Int))\n    counters.Set("cnt2", new(expvar.Float))\n\n    go http.ListenAndServe(":8080", nil)\n\n    for {\n        select {\n        case <- tick.C:\n            num_go.Set(int64(runtime.NumGoroutine()))\n            counters.Add("cnt1", 1)\n            counters.AddFloat("cnt2", 1.452)\n        }\n    }\n}\n```\n\nApart from the runtime memory stats, this application publishes two counters and the\nnumber of currently running Goroutines and updates these stats every second.\n\n\n\n### Configuration\n\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. |  | no |\n| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. |  | yes |\n| user | If the URL is password protected, this is the username to use. |  | no |\n| pass | If the URL is password protected, this is the password to use. |  | no |\n| collect_memstats | Enables charts for Go runtime\'s memory statistics. |  | no |\n| extra_charts | Defines extra data/charts to monitor, please see the example below. |  | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `python.d/go_expvar.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\n\njob_name:\n  job_option1: some_value\n  job_option2: some_other_vlaue\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/go_expvar.conf\n```\n\n##### Examples\n\n###### Monitor a Go app1 application\n\nThe example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.\n\nThe `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.\n\nThe `extra_charts` variable is a YaML list of Netdata chart definitions.\nEach chart definition has the following keys:\n\n```\nid:         Netdata chart ID\noptions:    a key-value mapping of chart options\nlines:      a list of line definitions\n```\n\n**Note: please do not use dots in the chart or line ID field.\nSee [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**\n\n\n**Line definitions**\n\nEach chart can define multiple lines (dimensions).\nA line definition is a key-value mapping of line options.\nEach line can have the following options:\n\n```\n# mandatory\nexpvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint\nexpvar_type: value type; supported are "float" or "int"\nid: the id of this line/dimension in Netdata\n\n# optional - Netdata defaults are used if these options are not defined\nname: \'\'\nalgorithm: absolute\nmultiplier: 1\ndivisor: 100 if expvar_type == float, 1 if expvar_type == int\nhidden: False\n```\n\nPlease see the following link for more information about the options and their default values:\n[External plugins - dimensions](https://github.com/netdata/netdata/blob/master/src/plugins.d/README.md#dimension)\n\nApart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;\nAll dicts in the resulting JSON document are then flattened to one level.\nExpvar names are joined together with \'.\' when flattening.\n\nExample:\n\n```\n{\n    "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983},\n    "runtime.goroutines": 5\n}\n```\n\nIn the above case, the exported variables will be available under `runtime.goroutines`,\n`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,\nthe first defined key wins and all subsequent keys with the same name are ignored.\n\n\n```yaml\napp1:\n name : \'app1\'\n url  : \'http://127.0.0.1:8080/debug/vars\'\n collect_memstats: true\n extra_charts:\n   - id: "runtime_goroutines"\n     options:\n       name: num_goroutines\n       title: "runtime: number of goroutines"\n       units: goroutines\n       family: runtime\n       context: expvar.runtime.goroutines\n       chart_type: line\n     lines:\n       - {expvar_key: \'runtime.goroutines\', expvar_type: int, id: runtime_goroutines}\n   - id: "foo_counters"\n     options:\n       name: counters\n       title: "some random counters"\n       units: awesomeness\n       family: counters\n       context: expvar.foo.counters\n       chart_type: line\n     lines:\n       - {expvar_key: \'counters.cnt1\', expvar_type: int, id: counters_cnt1}\n       - {expvar_key: \'counters.cnt2\', expvar_type: float, id: counters_cnt2}\n\n```\n',troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n  ```bash\n  ./python.d.plugin go_expvar debug trace\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `go_expvar` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep go_expvar\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep go_expvar /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep go_expvar\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go applications (EXPVAR) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| expvar.memstats.heap | alloc, inuse | KiB |\n| expvar.memstats.stack | inuse | KiB |\n| expvar.memstats.mspan | inuse | KiB |\n| expvar.memstats.mcache | inuse | KiB |\n| expvar.memstats.live_objects | live | objects |\n| expvar.memstats.sys | sys | KiB |\n| expvar.memstats.gc_pauses | avg | ns |\n\n",integration_type:"collector",id:"python.d.plugin-go_expvar-Go_applications_(EXPVAR)",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/go_expvar/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"python.d.plugin",module_name:"pandas",monitored_instance:{name:"Pandas",link:"https://pandas.pydata.org/",categories:["data-collection.databases"],icon_filename:"pandas.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["pandas","python"]},overview:"# Pandas\n\nPlugin: python.d.plugin\nModule: pandas\n\n## Overview\n\n[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.\nIf you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),\neither locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.\n\nThis collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.\n\n\nThe collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.\n\n```bash\nsudo pip install pandas requests\n```\n\nNote: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.\n\n```bash\nsudo pip install 'sqlalchemy<2.0' psycopg2-binary\n```\n\n\n\n### Configuration\n\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| chart_configs | an array of chart configuration dictionaries | [] | yes |\n| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.family | [family](https://github.com/netdata/netdata/blob/master/docs/dashboards-and-charts/netdata-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.context | [context](https://github.com/netdata/netdata/blob/master/docs/dashboards-and-charts/netdata-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. |  | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `python.d/pandas.conf`.\n\nThe file format is YAML. Generally, the structure is:\n\n```yaml\nupdate_every: 1\nautodetection_retry: 0\n\njob_name:\n  job_option1: some_value\n  job_option2: some_other_vlaue\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/pandas.conf\n```\n\n##### Examples\n\n###### Temperature API Example\n\nexample pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.\n\n{% details open=true summary=\"Config\" %}\n```yaml\ntemperature:\n    name: \"temperature\"\n    update_every: 5\n    chart_configs:\n      - name: \"temperature_forecast_by_city\"\n        title: \"Temperature By City - Today Forecast\"\n        family: \"temperature.today\"\n        context: \"pandas.temperature\"\n        type: \"line\"\n        units: \"Celsius\"\n        df_steps: >\n          pd.DataFrame.from_dict(\n            {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']\n            for (city,lat,lng)\n            in [\n                ('dublin', 53.3441, -6.2675),\n                ('athens', 37.9792, 23.7166),\n                ('london', 51.5002, -0.1262),\n                ('berlin', 52.5235, 13.4115),\n                ('paris', 48.8567, 2.3510),\n                ('madrid', 40.4167, -3.7033),\n                ('new_york', 40.71, -74.01),\n                ('los_angeles', 34.05, -118.24),\n                ]\n            }\n            );\n          df.describe();                                               # get aggregate stats for each city;\n          df.transpose()[['mean', 'max', 'min']].reset_index();        # just take mean, min, max;\n          df.rename(columns={'index':'city'});                         # some column renaming;\n          df.pivot(columns='city').mean().to_frame().reset_index();    # force to be one row per city;\n          df.rename(columns={0:'degrees'});                            # some column renaming;\n          pd.concat([df, df['city']+'_'+df['level_0']], axis=1);       # add new column combining city and summary measurement label;\n          df.rename(columns={0:'measurement'});                        # some column renaming;\n          df[['measurement', 'degrees']].set_index('measurement');     # just take two columns we want;\n          df.sort_index();                                             # sort by city name;\n          df.transpose();                                              # transpose so its just one wide row;\n      - name: \"temperature_current_by_city\"\n        title: \"Temperature By City - Current\"\n        family: \"temperature.current\"\n        context: \"pandas.temperature\"\n        type: \"line\"\n        units: \"Celsius\"\n        df_steps: >\n          pd.DataFrame.from_dict(\n              {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&current_weather=true').json()['current_weather']\n              for (city,lat,lng)\n              in [\n                  ('dublin', 53.3441, -6.2675),\n                  ('athens', 37.9792, 23.7166),\n                  ('london', 51.5002, -0.1262),\n                  ('berlin', 52.5235, 13.4115),\n                  ('paris', 48.8567, 2.3510),\n                  ('madrid', 40.4167, -3.7033),\n                  ('new_york', 40.71, -74.01),\n                  ('los_angeles', 34.05, -118.24),\n                  ]\n              }\n              );\n          df.transpose();\n          df[['temperature']];\n          df.transpose();\n\n```\n{% /details %}\n###### API CSV Example\n\nexample showing a read_csv from a url and some light pandas data wrangling.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nexample_csv:\n    name: \"example_csv\"\n    update_every: 2\n    chart_configs:\n      - name: \"london_system_cpu\"\n        title: \"London System CPU - Ratios\"\n        family: \"london_system_cpu\"\n        context: \"pandas\"\n        type: \"line\"\n        units: \"n\"\n        df_steps: >\n          pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});\n          df.drop('time', axis=1);\n          df.mean().to_frame().transpose();\n          df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();\n          df.rename(columns={0:'average_user_system_ratio'});\n          df*100;\n\n```\n{% /details %}\n###### API JSON Example\n\nexample showing a read_json from a url and some light pandas data wrangling.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nexample_json:\n    name: \"example_json\"\n    update_every: 2\n    chart_configs:\n      - name: \"london_system_net\"\n        title: \"London System Net - Total Bandwidth\"\n        family: \"london_system_net\"\n        context: \"pandas\"\n        type: \"area\"\n        units: \"kilobits/s\"\n        df_steps: >\n          pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);\n          df.drop('time', axis=1);\n          abs(df);\n          df.sum(axis=1).to_frame();\n          df.rename(columns={0:'total_bandwidth'});\n\n```\n{% /details %}\n###### XML Example\n\nexample showing a read_xml from a url and some light pandas data wrangling.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nexample_xml:\n    name: \"example_xml\"\n    update_every: 2\n    line_sep: \"|\"\n    chart_configs:\n      - name: \"temperature_forcast\"\n        title: \"Temperature Forecast\"\n        family: \"temp\"\n        context: \"pandas.temp\"\n        type: \"line\"\n        units: \"celsius\"\n        df_steps: >\n          pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|\n          df.rename(columns={'value': 'dublin'})|\n          df[['dublin']]|\n\n```\n{% /details %}\n###### SQL Example\n\nexample showing a read_sql from a postgres database using sqlalchemy.\n\n{% details open=true summary=\"Config\" %}\n```yaml\nsql:\n    name: \"sql\"\n    update_every: 5\n    chart_configs:\n      - name: \"sql\"\n        title: \"SQL Example\"\n        family: \"sql.example\"\n        context: \"example\"\n        type: \"line\"\n        units: \"percent\"\n        df_steps: >\n          pd.read_sql_query(\n            sql='\\\n                select \\\n                    random()*100 as metric_1, \\\n                    random()*100 as metric_2 \\\n              ',\n            con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')\n            );\n\n```\n{% /details %}\n",troubleshooting:"## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n  your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n  ```bash\n  cd /usr/libexec/netdata/plugins.d/\n  ```\n\n- Switch to the `netdata` user.\n\n  ```bash\n  sudo -u netdata -s\n  ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n  ```bash\n  ./python.d.plugin pandas debug trace\n  ```\n\n### Getting Logs\n\nIf you're encountering problems with the `pandas` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues.  These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep pandas\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep pandas /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep pandas\n```\n\n",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThis collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken\nas the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).\nSee [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html).\"\n\n\n### Per Pandas instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n\n",integration_type:"collector",id:"python.d.plugin-pandas-Pandas",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/pandas/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-scripts.d.plugin-nagios",plugin_name:"scripts.d.plugin",module_name:"nagios",monitored_instance:{name:"Nagios Plugins",link:"https://www.nagios-plugins.org/",icon_filename:"nagios.png",categories:["data-collection.synthetic-testing"]},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["nagios","plugins","checks","scripts","monitoring"]},overview:"# Nagios Plugins\n\nPlugin: scripts.d.plugin\nModule: nagios\n\n## Overview\n\nThis module runs unmodified [Nagios plugins](https://www.nagios-plugins.org/) inside Netdata without any changes to the plugins themselves.\n\nFor each configured job it collects:\n\n- **Check state**: OK / WARNING / CRITICAL / UNKNOWN (with soft/hard state tracking).\n- **Performance data**: Every `label=value;warn;crit;min;max` metric emitted by the plugin is parsed and charted automatically, with unit normalization where possible.\n- **Execution telemetry**: Runtime duration, scheduling latency, CPU time, peak RSS memory, and disk I/O per job.\n- **Scheduler health**: Running / queued / scheduled job counts and throughput rates.\n\n\nJobs are executed via `nd-run` (the Netdata unprivileged helper) at the configured `check_interval`.\nStandard Nagios macros (`$HOSTADDRESS$`, `$ARG1$`, `$USERn$`, etc.) are expanded before execution.\nPlugin output is parsed according to the [Nagios Plugin API](https://nagios-plugins.org/doc/guidelines.html):\nthe first line provides the status and optional performance data after the `|` separator.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nPlugins run as the `netdata` user via `nd-run`. If a plugin requires elevated privileges, configure it through `ndsudo` or adjust filesystem permissions accordingly.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nNo auto-detection. Each job must be explicitly configured with a `plugin` path pointing to the Nagios plugin executable.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nEach job spawns a subprocess via `nd-run`. Resource usage (CPU, memory, disk I/O) is tracked per execution and exposed as telemetry charts.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Install Nagios plugins\n\nInstall the plugins you want to run. Most distributions provide packages:\n\n```bash\n# Debian/Ubuntu\napt install nagios-plugins\n\n# RHEL/CentOS/Fedora\ndnf install nagios-plugins-all\n```\n\nYou can also use any script or binary that follows the [Nagios Plugin API](https://nagios-plugins.org/doc/guidelines.html).\n\n\n\n### Configuration\n\n#### Options\n\nEach job defines a single Nagios plugin execution. Jobs are listed under the `jobs` key.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **General** | plugin | Absolute path to the Nagios plugin executable. |  | yes |\n| **Arguments** | args | Command-line arguments passed to the plugin. Nagios macros are expanded before execution. | [] | no |\n|  | arg_values | Values bound to positional `$ARGn$` macros (max 32). | [] | no |\n| **General** | vnode | Virtual node name to associate with this job. The vnode must be defined in the vnodes configuration directory. |  | no |\n|  | scheduler | Name of the scheduler that executes this job. | default | no |\n| **Timing** | timeout | Maximum execution time before the plugin is killed. Duration string (e.g. `30s`, `1m`). | 30s | no |\n|  | timeout_state | State reported when a timeout occurs. | critical | no |\n|  | check_interval | Base scheduling interval between executions. Duration string. | 1m | no |\n|  | retry_interval | Interval between soft retries after a non-OK result. Duration string. | 30s | no |\n|  | max_check_attempts | Number of soft-state attempts before transitioning to a hard state. | 3 | no |\n| **Environment** | user_macros | Key/value pairs exposed as `$USERn$` macros. For example, `USER1` becomes `$USER1$`. | {} | no |\n|  | custom_vars | Key/value pairs exported as `$_SERVICEvar$` environment variables (`NAGIOS__SERVICEvar`). | {} | no |\n|  | environment | Additional environment variables set before executing the plugin. | {} | no |\n| **General** | working_directory | Working directory for plugin execution. |  | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `scripts.d/nagios.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config scripts.d/nagios.conf\n```\n\n##### Examples\n\n###### SSL certificate check\n\nCheck SSL certificate expiry for a host.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: ssl_github\n    plugin: /usr/lib/nagios/plugins/check_http\n    args: ["-H", "github.com", "--ssl", "-C", "30,15"]\n    timeout: 30s\n    check_interval: 1h\n    retry_interval: 5m\n    max_check_attempts: 3\n\n```\n{% /details %}\n###### Check with macros and vnode\n\nRun a plugin against a virtual node using Nagios macros.\nThe `$HOSTADDRESS$` and `$ARG1$` macros are expanded from the vnode labels and `arg_values`.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: check_ssh\n    plugin: /usr/lib/nagios/plugins/check_ssh\n    args: ["-H", "$HOSTADDRESS$", "-p", "$ARG1$"]\n    arg_values: ["22"]\n    vnode: my-server\n    check_interval: 5m\n\n```\n{% /details %}\n###### Check with custom vars\n\nPass service-level custom variables to a plugin as `$_SERVICEvar$` macros.\n\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: check_api\n    plugin: /usr/local/bin/check_api\n    args: ["-u", "$_SERVICEENDPOINT$"]\n    custom_vars:\n      ENDPOINT: "/health"\n\n```\n{% /details %}\n',troubleshooting:'## Troubleshooting\n\n### \n\nPlugin exits with "permission denied".\n\n\n### \n\nMacros like `$HOSTADDRESS$` are not expanded.\n\n\n',alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:'## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n### Virtual Node Label Conventions\n\nWhen a job references a `vnode`, the module reads Nagios macros from the virtual node\'s **labels** using prefix conventions:\n\n| Label key | Nagios macro | Environment variable | Description |\n|-----------|-------------|---------------------|-------------|\n| `_address` | `$HOSTADDRESS$` | `NAGIOS_HOSTADDRESS` | IP address or DNS name of the host |\n| `_alias` | `$HOSTALIAS$` | `NAGIOS_HOSTALIAS` | Human-readable host alias |\n| `_VARNAME` | `$_HOSTVARNAME$` | `NAGIOS__HOSTVARNAME` | Custom host variable (any `_` prefixed key except `_address` and `_alias`) |\n| `key` | `$_HOSTLABEL_KEY$` | `NAGIOS__HOSTLABEL_KEY` | Regular label (no `_` prefix) |\n\nExample vnode configuration (`/etc/netdata/vnodes/hosts.yaml`):\n\n```yaml\n- hostname: web-server-1\n  guid: 12345678-1234-1234-1234-123456789abc\n  labels:\n    _address: "192.168.1.10"\n    _alias: "Web Server 1"\n    _DATACENTER: "us-east-1"\n    role: "frontend"\n    environment: "production"\n```\n\nThis produces:\n\n| Macro | Value |\n|-------|-------|\n| `$HOSTADDRESS$` | `192.168.1.10` |\n| `$HOSTALIAS$` | `Web Server 1` |\n| `$_HOSTDATACENTER$` | `us-east-1` |\n| `$_HOSTLABEL_ROLE$` | `frontend` |\n| `$_HOSTLABEL_ENVIRONMENT$` | `production` |\n\n\n### Per job\n\nMetrics for each configured Nagios plugin job.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| nagios_job | Job name as defined in the configuration. |\n| nagios_plugin | Basename of the plugin executable. |\n| nagios_vnode | Virtual node associated with the job (if any). |\n| nagios_scheduler | Scheduler executing the job. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nagios.jobs.state | ok, warning, critical, unknown | state |\n| nagios.jobs.runtime | running, retrying, skipped | boolean |\n| nagios.jobs.latency | duration | seconds |\n| nagios.jobs.cpu | cpu | seconds |\n| nagios.jobs.mem | rss | bytes |\n| nagios.jobs.disk | read, write | bytes |\n\n### Per perfdata\n\nMetrics extracted from the plugin\'s performance data output.\nEach `label=value;warn;crit;min;max` entry produces a separate chart.\n\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| nagios_job | Job name. |\n| nagios_plugin | Plugin executable. |\n| perf_label | Performance data label as emitted by the plugin. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nagios.{script}.{label} | value | varies |\n\n### Per scheduler\n\nScheduler-level metrics.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| nagios_scheduler | Scheduler name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nagios.scheduler.jobs | running, queued, scheduled | jobs |\n| nagios.scheduler.rate | started, finished, skipped | jobs |\n| nagios.scheduler.next | next | seconds |\n\n',integration_type:"collector",id:"scripts.d.plugin-nagios-Nagios_Plugins",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/scripts.d/modules/nagios/metadata.yaml",functions:"",related_resources:""},{meta:{id:"collector-scripts.d.plugin-scheduler",plugin_name:"scripts.d.plugin",module_name:"scheduler",monitored_instance:{name:"scripts.d Scheduler",link:"",icon_filename:"netdata-logomark.svg",categories:["data-collection.applications"]},related_resources:{integrations:{list:[{plugin_name:"scripts.d.plugin",module_name:"nagios"},{plugin_name:"scripts.d.plugin",module_name:"zabbix"}]}},info_provided_to_referring_integrations:{description:""},keywords:["scheduler","scripts"]},overview:'# scripts.d Scheduler\n\nPlugin: scripts.d.plugin\nModule: scheduler\n\n## Overview\n\nThe scheduler module manages the execution of jobs defined by the nagios and zabbix modules.\n\nIt provides:\n\n- **Worker pool**: Concurrent execution with configurable worker count and queue depth.\n- **OTLP logging**: Optional structured log export via gRPC for job execution results.\n\nScheduler-level metrics (jobs status, throughput, next run time) are exposed through the nagios and zabbix modules that use it.\n\n\nThe scheduler manages per-job timers and dispatches jobs to a worker pool.\nEach worker executes a job via the configured runner (nagios or zabbix) and reports results back.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\nscripts.d Scheduler can be monitored further using the following other integrations:\n\n- {% relatedResource id="scripts.d.plugin-nagios-Nagios_Plugins" %}Nagios Plugins{% /relatedResource %}\n\n### Default Behavior\n\n#### Auto-Detection\n\nA `default` scheduler is created automatically. Additional named schedulers can be defined in the configuration.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n',setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nScheduler configuration controls the worker pool and optional OTLP logging.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Group | Option | Description | Default | Required |\n|:------|:-----|:------------|:--------|:---------:|\n| **General** | workers | Number of concurrent workers executing jobs. | 50 | no |\n|  | queue_size | Capacity of the internal work queue. | 128 | no |\n| **Logging** | logging.enabled | Enable structured OTLP log export for job results. | true | no |\n|  | logging.otlp.endpoint | gRPC endpoint for OTLP log export. | 127.0.0.1:4317 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `scripts.d/scheduler.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config scripts.d/scheduler.conf\n```\n\n##### Examples\n\n###### Custom scheduler\n\nDefine a scheduler with a larger worker pool.\n\n{% details open=true summary="Config" %}\n```yaml\njobs:\n  - name: heavy\n    workers: 100\n    queue_size: 256\n\n```\n{% /details %}\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nScheduler metrics are exposed through the nagios and zabbix modules under the `nagios.scheduler.*` context.\n\n",integration_type:"collector",id:"scripts.d.plugin-scheduler-scripts.d_Scheduler",edit_link:"https://github.com/netdata/netdata/blob/master/src/go/plugin/scripts.d/modules/scheduler/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"slabinfo.plugin",module_name:"slabinfo.plugin",monitored_instance:{name:"Linux kernel SLAB allocator statistics",link:"https://kernel.org/",categories:["data-collection.operating-systems"],icon_filename:"linuxserver.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["linux kernel","slab","slub","slob","slabinfo"]},overview:"# Linux kernel SLAB allocator statistics\n\nPlugin: slabinfo.plugin\nModule: slabinfo.plugin\n\n## Overview\n\nCollects metrics on kernel SLAB cache utilization to monitor the low-level performance impact of workloads in the kernel.\n\n\nThe plugin parses `/proc/slabinfo`\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to `/proc/slabinfo`, which is accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to this file. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions sVko that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nDue to the large number of metrics generated by this integration, it is disabled by default and must be manually enabled inside `/etc/netdata/netdata.conf`\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Minimum setup\n\nIf you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`.\n\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="The main configuration file." %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| Enable plugin | As described above plugin is disabled by default, this option is used to enable plugin. | no | yes |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugins]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nSLAB cache utilization metrics for the whole system.\n\n### Per Linux kernel SLAB allocator statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.slabmemory | a dimension per cache | B |\n| mem.slabfilling | a dimension per cache | % |\n| mem.slabwaste | a dimension per cache | B |\n\n",integration_type:"collector",id:"slabinfo.plugin-slabinfo.plugin-Linux_kernel_SLAB_allocator_statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/slabinfo.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"tc.plugin",module_name:"tc.plugin",monitored_instance:{name:"tc QoS classes",link:"https://wiki.linuxfoundation.org/networking/iproute2",categories:["data-collection.networking"],icon_filename:"netdata.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# tc QoS classes\n\nPlugin: tc.plugin\nModule: tc.plugin\n\n## Overview\n\nExamine tc metrics to gain insights into Linux traffic control operations. Study packet flow rates, queue lengths, and drop rates to optimize network traffic flow.\n\nThe plugin uses `tc` command to collect information about Traffic control.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access command `tc` to get the necessary metrics. To achieve this netdata modifies permission of file `/usr/libexec/netdata/plugins.d/tc-qos-helper.sh`.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Create `tc-qos-helper.conf`\n\nIn order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:\n\n```text\ntc_show="class"\n```\n\n\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config option" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| script to run to get tc values | Path to script `tc-qos-helper.sh` | usr/libexec/netdata/plugins.d/tc-qos-helper.s | no |\n| enable show all classes and qdiscs for all interfaces | yes/no flag to control what data is presented. | yes | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:tc]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic example configuration using classes defined in `/etc/iproute2/tc_cls`.\n\nAn example of class IDs mapped to names in that file can be:\n\n```text\n2:1 Standard\n2:8 LowPriorityData\n2:10 HighThroughputData\n2:16 OAM\n2:18 LowLatencyData\n2:24 BroadcastVideo\n2:26 MultimediaStreaming\n2:32 RealTimeInteractive\n2:34 MultimediaConferencing\n2:40 Signalling\n2:46 Telephony\n2:48 NetworkControl\n```\n\nYou can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).\n\n\n```yaml\n[plugin:tc]\n  script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh\n  enable show all classes and qdiscs for all interfaces = yes\n\n```\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per network device direction\n\nMetrics related to QoS network device directions. Each direction (in/out) produces its own set of the following metrics.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | The network interface. |\n| device_name | The network interface name |\n| group | The device family |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tc.qos | a dimension per class | kilobits/s |\n| tc.qos_packets | a dimension per class | packets/s |\n| tc.qos_dropped | a dimension per class | packets/s |\n| tc.qos_tokens | a dimension per class | tokens |\n| tc.qos_ctokens | a dimension per class | ctokens |\n\n",integration_type:"collector",id:"tc.plugin-tc.plugin-tc_QoS_classes",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/tc.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"timex.plugin",module_name:"timex.plugin",monitored_instance:{name:"Timex",link:"",categories:["data-collection.networking"],icon_filename:"syslog.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# Timex\n\nPlugin: timex.plugin\nModule: timex.plugin\n\n## Overview\n\nExamine Timex metrics to gain insights into system clock operations. Study time sync status, clock drift, and adjustments to ensure accurate system timekeeping.\n\nIt uses system call adjtimex on Linux and ntp_adjtime on FreeBSD or Mac to monitor the system kernel clock synchronization state.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nAt least one option ('clock synchronization state', 'time offset') needs to be enabled for this collector to run.\n\n{% details open=true summary=\"Config options\" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n| clock synchronization state | Make chart showing system clock synchronization state. | yes | yes |\n| time offset | Make chart showing computed time offset between local system and reference clock | yes | yes |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:timex]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\n\n###### Basic\n\nA basic configuration example.\n\n{% details open=true summary=\"Config\" %}\n```yaml\n[plugin:timex]\n  update every = 1\n  clock synchronization state = yes\n  time offset = yes\n\n```\n{% /details %}\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ system_clock_sync_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/timex.conf) | system.clock_sync_state | when set to 0, the system kernel believes the system clock is not properly synchronized to a reliable server |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Timex instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.clock_sync_state | state | state |\n| system.clock_status | unsync, clockerr | status |\n| system.clock_sync_offset | offset | milliseconds |\n\n",integration_type:"collector",id:"timex.plugin-timex.plugin-Timex",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/timex.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"GetHardwareInfo",monitored_instance:{name:"Hardware information collected from kernel ring.",link:"https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/",categories:["data-collection.hardware-and-sensors"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["cpu","temperature"]},overview:"# Hardware information collected from kernel ring.\n\nPlugin: windows.plugin\nModule: GetHardwareInfo\n\n## Overview\n\nThis collector monitors cpu temperature on Windows systems.\n\n\nIt reads `msr` register using netdata_driver.sys driver.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis optional feature requires the Netdata Driver (netdata_driver.sys) to be installed on the host.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:GetHardwareInfo]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Hardware information collected from kernel ring. instance\n\nThis metric show latest CPU temperature.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.temperature | a dimension per core | Celcius |\n\n",integration_type:"collector",id:"windows.plugin-GetHardwareInfo-Hardware_information_collected_from_kernel_ring.",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"GetPowerSupply",monitored_instance:{name:"Power supply",link:"https://learn.microsoft.com/en-us/windows/win32/power/power-management-portal",categories:["data-collection.hardware-and-sensors"],icon_filename:"powersupply.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["power supply"]},overview:"# Power supply\n\nPlugin: windows.plugin\nModule: GetPowerSupply\n\n## Overview\n\nThis collector monitors power supply statistics on Windows systems.\n\n\nIt uses Windows Internal API to retrieve available data.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:GetPowerSupply]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ power_supply_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/power_supply_capacity.conf) | powersupply.capacity | percentage of remaining power supply capacity |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Power Supply\n\nThese metrics refer to Power Supply device.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| device | Device name delivered as a Windows path. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powersupply.capacity | capacity | percentage |\n| powersupply.voltage | now | V |\n\n",integration_type:"collector",id:"windows.plugin-GetPowerSupply-Power_supply",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"GetSensors",monitored_instance:{name:"Sensors",link:"https://learn.microsoft.com/en-us/windows/win32/sensorsapi/portal",categories:["data-collection.hardware-and-sensors"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["Sensors","Windows"]},overview:"# Sensors\n\nPlugin: windows.plugin\nModule: GetSensors\n\n## Overview\n\nThis collector monitors sensors on Windows systems.\n\n\nIt uses Sensors API to retrieve available data.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:GetSensors]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\n\n###### Specific Sensors\n\nYou can add custom sensor information by creating a dedicated sensor section.\n\n```yaml\n[plugin:windows:GetSensors:Your Sensor Name]\n  units = Speed\n  multiplier = 100\n  title = Current speed.\n\n```\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Sensor\n\nThese metrics refer to Sensors.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| name | Sensor friendly name. |\n| manufacturer | The sensor manufacturer. |\n| model | The sensor model. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.hw.sensor.temperature.input | input | Cel |\n| system.hw.sensor.power.input | input | W |\n| system.hw.sensor.current.input | input | A |\n| system.hw.sensor.humidity.input | input | % |\n| system.hw.sensor.lux.input | input | lx |\n| system.hw.sensor.color.input | input | Cel |\n| system.hw.sensor.voltage.input | input | V |\n| system.hw.sensor.resistance.input | input | Ohms |\n| system.hw.sensor.capacitance.input | input | F |\n| system.hw.sensor.inductance.input | input | H |\n| system.hw.sensor.pressure.input | input | Pa |\n| system.hw.sensor.latitude.input | input | Degrees |\n| system.hw.sensor.longitude.input | input | Degrees |\n| system.hw.sensor.force.input | input | N |\n| system.hw.sensor.gauge_pressure.input | input | Pa |\n| system.hw.sensor.human_presence.input | input | presence |\n| system.hw.sensor.human_proximity.input | input | m |\n| system.hw.sensor.distance.input | inputX, inputY, inputZ | m |\n| system.hw.sensor.acceleration.input | inputX, inputY, inputZ | g |\n| system.hw.sensor.state.input | ready, not_available, no_data, initializing, access_denied, error | status |\n| system.hw.sensor.custom | inputN | nd |\n\n",integration_type:"collector",id:"windows.plugin-GetSensors-Sensors",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibAD",monitored_instance:{name:"Active Directory",link:"https://learn.microsoft.com/en-us/windows-server/identity/ad-ds/get-started/virtual-dc/active-directory-domain-services-overview",categories:["data-collection.applications"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["windows","microsoft","active directory","ad"]},overview:"# Active Directory\n\nPlugin: windows.plugin\nModule: PerflibAD\n\n## Overview\n\nThis collector monitors Active Directory IO and queries.\n\n\nIt queries DirectoryServices object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 10 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibAD]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Active Directory\n\nThese metrics refer to Active Directory.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.dra_replication_properties_updated | inbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound | properties/s |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | request | requests/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.ds_threads | thread | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.ldap_searches | searches | searches/s |\n| ad.atq_average_request_latency | time | seconds |\n\n",integration_type:"collector",id:"windows.plugin-PerflibAD-Active_Directory",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibADCS",monitored_instance:{name:"Active Directory Certificate Service",link:"https://learn.microsoft.com/en-us/windows-server/identity/ad-cs/",categories:["data-collection.applications"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["windows","microsoft","active directory","adcs","ad"]},overview:"# Active Directory Certificate Service\n\nPlugin: windows.plugin\nModule: PerflibADCS\n\n## Overview\n\nThis collector monitors Active Directory Certificate Services statistics.\n\n\nIt queries 'Certification Authority' object per certificate from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 10 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibADCS]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Certificate Service Certificate\n\nThese metrics refer to the Certificate instances defined on host.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cert | The certificate name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_requests | requests | requests/s |\n| adcs.cert_request_processing_time | processing_times | seconds |\n| adcs.cert_retrievals | retrievals | retrievals/s |\n| adcs.cert_failed_requests | failed | requests/s |\n| adcs.cert_issued_requests | issued | requests/s |\n| adcs.cert_pending_requests | pending | requests/s |\n| adcs.cert_challenge_responses | challenge | responses/s |\n| adcs.cert_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_signed_certificate_timestamp_lists | processing_time | lists/s |\n| adcs.cert_signed_certificate_timestamp_lists | lists | lists/s |\n| adcs.cert_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n| adcs.cert_retrieval_processing_time | processing_time | seconds |\n\n",integration_type:"collector",id:"windows.plugin-PerflibADCS-Active_Directory_Certificate_Service",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibADFS",monitored_instance:{name:"Active Directory Federation Service",link:"https://learn.microsoft.com/en-us/windows-server/identity/ad-fs/ad-fs-overview",categories:["data-collection.applications"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["windows","microsoft","active directory","adfs","ad"]},overview:"# Active Directory Federation Service\n\nPlugin: windows.plugin\nModule: PerflibADFS\n\n## Overview\n\nThis collector monitors Active Directory Federation Services statistics.\n\n\nIt queries 'AD FS' object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 10 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibADFS]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Federation Service\n\nThese metrics refer to the Federated Identity and Access Management on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | requests/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n\n",integration_type:"collector",id:"windows.plugin-PerflibADFS-Active_Directory_Federation_Service",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibASP",monitored_instance:{name:"ASP.NET",link:"https://dotnet.microsoft.com/en-us/apps/aspnet",categories:["data-collection.web-servers-and-proxies"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["windows","ASP","webservice"]},overview:"# ASP.NET\n\nPlugin: windows.plugin\nModule: PerflibASP\n\n## Overview\n\nThis collector monitors ASP.NET applications.\n\n\nIt queries for the 'ASP.NET' and 'ASP.NET Applications' objects from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| PerflibASP | An option to enable or disable the data collection. | yes | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ASP.NET Global Data\n\nThese metrics refer to ASP.NET global metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| aspnet.application_restarts | restarts | restarts |\n| aspnet.worker_process_restarts | restarts | restarts |\n\n### Per ASP.NET Applications\n\nASP.NET application performance counters\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| aspnet_app | The application name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| aspnet.anonymous_request | requests | requests |\n| aspnet.compilation_totals | compilations | compilations |\n| aspnet.errors_during_preprocessing | preprocessing | errors |\n| aspnet.errors_during_compilation | compilation | errors |\n| aspnet.errors_during_execution | execution | errors |\n| aspnet.errors_during_unhandled_execution | unhandled | errors |\n| aspnet.requests_byte_total | in, out | bytes |\n| aspnet.requests_executing | executing | requests |\n| aspnet.requests_failed | executing | requests |\n| aspnet.requests_not_found | not found | requests |\n| aspnet.requests_not_authorized | not_authorized | requests |\n| aspnet.requests_in_application_queue | queue | requests |\n| aspnet.requests_timeout | timeout | requests |\n| aspnet.requests_succeeded | success | requests |\n| aspnet.sessions_active | active | sessions |\n| aspnet.sessions_abandoned | abandoned | sessions |\n| aspnet.sessions_abandoned | abandoned | sessions |\n| aspnet.sessions_timed_out | timed out | sessions |\n| aspnet.transactions_aborted | aborted | transactions |\n| aspnet.transactions_committed | committed | transactions |\n| aspnet.transactions_pending | pending | transactions |\n| aspnet.events_raised_per_sec | raised | events |\n| aspnet.error_events_raised_per_sec | events | errors |\n| aspnet.events_audit_success | events | errors |\n| aspnet.events_audit_failure | audit | failures |\n| aspnet.membership_auth_success | success | auth |\n| aspnet.form_authentication_success | success | auth |\n| aspnet.form_authentication_success | success | auth |\n| aspnet.form_authentication_failure | failure | auth |\n\n",integration_type:"collector",id:"windows.plugin-PerflibASP-ASP.NET",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibExchange",monitored_instance:{name:"MS Exchange",link:"https://www.microsoft.com/en-us/microsoft-365/exchange/email",categories:["data-collection.applications"],icon_filename:"exchange.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["windows","microsoft","exchange","mail"]},overview:"# MS Exchange\n\nPlugin: windows.plugin\nModule: PerflibExchange\n\n## Overview\n\nThis collector monitors Microsoft Exchange.\n\n\nIt queries different Exchange objects from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 10 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibExchange]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Windows Exchange\n\nThese metrics refer to Windows Exchange.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.transport_queues_active_mail_box_delivery | active | messages |\n| exchange.transport_queues_external_active_remote_delivery | active | messages |\n| exchange.transport_queues_internal_active_remote_delivery | active | messages |\n| exchange.transport_queues_unreachable | unreachable | messages |\n| exchange.transport_queues_poison | unreachable | messages |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_active_user | users | users |\n| exchange.rpc_connection | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_user_count | users | users |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | seconds |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | requests | seconds |\n| exchange.http_proxy_requests | requests | seconds |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n\n",integration_type:"collector",id:"windows.plugin-PerflibExchange-MS_Exchange",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibHyperV",monitored_instance:{name:"Hyper-V",link:"https://learn.microsoft.com/en-us/virtualization/hyper-v-on-windows/about/",categories:["data-collection.containers-and-vms"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["windows","microsoft","hyperv","virtualization","vm"]},overview:"# Hyper-V\n\nPlugin: windows.plugin\nModule: PerflibHyperV\n\n## Overview\n\nThis collector monitors website requests and logins.\n\n\nIt queries the 'HyperV' object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibHyperV]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Hyper-V\n\nThese metrics refer to the Hyper-V instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_io_tlb_flush | gpa | flushes/s |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_deposited_pages | gpa | pages |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n\n### Per Virtual Machine\n\nThese metrics refer to the Virtual Machine.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| vm_name | The name of the Virtual Machine. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | usage | percentage |\n| hyperv.vm_cpu_usage_by_run_context | guest, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned | bytes |\n| hyperv.vm_memory_physical_guest_visible | visible, available | bytes |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per Virtual Machine Storage Device\n\nThese metrics refer to the Virtual Machine Storage Device.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| vm_storage_device | The name of the Storage Device. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_storage_device_bytes | read, write | bytes/s |\n| hyperv.vm_storage_device_operations | read, write | operations/s |\n| hyperv.vm_storage_device_errors | errors | errors/s |\n\n### Per Virtual Machine Network Interface\n\nThese metrics refer to the Virtual Machine Network Interface.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| vm_net_interface | The name of the Network Interface. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_net_interface_traffic | received, sent | kilobits/s |\n| hyperv.vm_net_interface_ipsec_traffic | received, sent | kilobits/s |\n| hyperv.vm_net_interface_packets | received, sent | packets/s |\n| hyperv.vm_net_interface_directed_packets | received, sent | packets/s |\n| hyperv.vm_net_interface_broadcast_packets | received, sent | packets/s |\n| hyperv.vm_net_interface_multicast_packets | received, sent | packets/s |\n| hyperv.vm_net_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per Virtual Switch\n\nThese metrics refer to the Virtual Switch.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| vswitch | The name of the Virtual Switch. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_traffic | received, sent | kilobits/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n",integration_type:"collector",id:"windows.plugin-PerflibHyperV-Hyper-V",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibMemory",monitored_instance:{name:"Memory statistics",link:"https://learn.microsoft.com/en-us/windows/win32/Memory/memory-management",categories:["data-collection.operating-systems"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["memory","swap"]},overview:"# Memory statistics\n\nPlugin: windows.plugin\nModule: PerflibMemory\n\n## Overview\n\nThis collector monitors swap and memory pool statistics on Windows systems.\n\n\nIt queries for the 'Memory' object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibMemory]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System\n\nThese metrics refer to the entire system.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swap_iops | read, write | operations/s |\n| mem.swap_pages_io | read, write | pages/s |\n| mem.system_pool_size | paged, non-paged | bytes |\n| mem.system_page_table_entries | free | pages |\n\n",integration_type:"collector",id:"windows.plugin-PerflibMemory-Memory_statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibNUMA",monitored_instance:{name:"NUMA Architecture",link:"https://learn.microsoft.com/en-us/windows/win32/procthread/numa-support",categories:["data-collection.operating-systems"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["windows","NUMA","processor"]},overview:"# NUMA Architecture\n\nPlugin: windows.plugin\nModule: PerflibNUMA\n\n## Overview\n\nThis collector monitors NUMA Architecture on Windows.\n\n\nIt queries NUMA Node Memory from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibNUMA]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NUMA\n\nThese metrics refer to memory utilization on NUMA systems.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| node | The identifier of the CPU node. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.numa_node_mem_usage | free, standby | bytes |\n\n",integration_type:"collector",id:"windows.plugin-PerflibNUMA-NUMA_Architecture",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibNetFramework",monitored_instance:{name:"NET Framework",link:"https://dotnet.microsoft.com/en-us/learn/dotnet/what-is-dotnet-framework",categories:["data-collection.applications"],icon_filename:"dotnet.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["windows","microsoft","netframework","dotnet"]},overview:"# NET Framework\n\nPlugin: windows.plugin\nModule: PerflibNetFramework\n\n## Overview\n\nThis collector monitors application built with .NET\n\n\nIt queries different NET Framework objects per process from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibNetFramework]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Processes running\n\nThese metrics refer to NET applications.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| process | The process name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_remote_calls | calls | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n\n",integration_type:"collector",id:"windows.plugin-PerflibNetFramework-NET_Framework",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibNetwork",monitored_instance:{name:"Network Subsystem",link:"https://learn.microsoft.com/en-us/windows/win32/networking",categories:["data-collection.networking"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["network","bandwidth","ip","udp","tcp","interface"]},overview:"# Network Subsystem\n\nPlugin: windows.plugin\nModule: PerflibNetwork\n\n## Overview\n\nMonitor network interface metrics about bandwidth, state, errors and more.\n\n\nIt queries 'Network Interface' and 'Network Adapter' objects from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibNetwork]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | Ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | Ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | Average inbound utilization for the network interface ${label:device} over the last minute |\n| [ 1m_sent_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | Average outbound utilization for the network interface ${label:device} over the last minute |\n| [ network_interface_output_queue_length ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.queue_length | Output Queue Length on interface ${label:device} should be zero, otherwise there are delays and bottlenecks. |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System\n\nThese metrics refer to the entire System.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n| ip.tcppackets | received, sent | packets/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, OutNoRoutes, InAddrErrors, InHdrErrors, InUnknownProtos | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InDestUnreachs, OutDestUnreachs, InRedirects, OutRedirects, InEchos, OutEchos, InRouterAdvert, OutRouterAdvert, InRouterSelect, OutRouterSelect, InTimeExcds, OutTimeExcds, InParmProbs, OutParmProbs, InTimestamps, OutTimestamps, InTimestampReps, OutTimestampReps | packets/s |\n| ipv6.packets | received, sent, forwarded, delivered | packets/s |\n| ipv6.tcppackets | received, sent | packets/s |\n| ipv6.udppackets | received, sent | packets/s |\n| ipv6.icmp | received, sent | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, OutNoRoutes, InAddrErrors, InHdrErrors, InUnknownProtos | packets/s |\n| ipv6.icmpmsg | InEchoReps, OutEchoReps, InDestUnreachs, OutDestUnreachs, InRedirects, OutRedirects, InEchos, OutEchos, InRouterAdvert, OutRouterAdvert, InRouterSelect, OutRouterSelect, InTimeExcds, OutTimeExcds, InParmProbs, OutParmProbs, InTimestamps, OutTimestamps, InTimestampReps, OutTimestampReps | packets/s |\n\n### Per network device\n\nThese metrics refer to Network Interfaces.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| interface_type | Classification of the network interface (real or virtual). |\n| device | System-assigned network interface identifier. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent | packets/s |\n| net.speed | speed | kilobits/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.queue_length | length | packets |\n| net.rsc_connections | connections | connections |\n| net.rsc_packets | packets | packets/s |\n| net.rsc_exceptions | exceptions | exceptions/s |\n| net.rsc_average_packet_size | average | bytes |\n| net.chimney_connections | connections | connections |\n\n",integration_type:"collector",id:"windows.plugin-PerflibNetwork-Network_Subsystem",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibObjects",monitored_instance:{name:"Semaphore statistics",link:"https://learn.microsoft.com/en-us/windows/win32/sync/semaphore-objects",categories:["data-collection.operating-systems"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["ipc","semaphores"]},overview:"# Semaphore statistics\n\nPlugin: windows.plugin\nModule: PerflibObjects\n\n## Overview\n\nInter-Process Communication (IPC) enables different processes to communicate and coordinate with each other. This collector monitors IPC semaphores, which are synchronization tools that:\n\n- Control access to shared resources (like files, memory, or devices).\n- Ensure only one process can access a resource at a time.\n- Prevent conflicts between competing processes.\n\n\nIt queries for the 'Objects' object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibObjects]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System\n\nThese metrics refer to the entire system.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_mutexes | mutexes | mutexes |\n\n",integration_type:"collector",id:"windows.plugin-PerflibObjects-Semaphore_statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibProcesses",monitored_instance:{name:"System statistics",link:"https://learn.microsoft.com/en-us/windows/win32/procthread/processes-and-threads",categories:["data-collection.operating-systems"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["process counts","threads","context switch"]},overview:"# System statistics\n\nPlugin: windows.plugin\nModule: PerflibProcesses\n\n## Overview\n\nThis collector monitors the current number of processes, threads, and context switches on Windows systems.\n\n\nIt queries the 'System' object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibProcesses]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System\n\nThese metrics refer to the entire system.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.processes | running | processes |\n| system.threads | threads | threads |\n| system.ctxt | switches | context switches/s |\n| system.processor_queue_length | threads | threads |\n\n",integration_type:"collector",id:"windows.plugin-PerflibProcesses-System_statistics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibProcessor",monitored_instance:{name:"Processor",link:"https://learn.microsoft.com/en-us/windows-hardware/design/minimum/windows-processor-requirements",categories:["data-collection.operating-systems"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["windows","microsoft","processor","CPU"]},overview:"# Processor\n\nPlugin: windows.plugin\nModule: PerflibProcessor\n\n## Overview\n\nThis collector monitors processors statistics on host.\n\n\nIt queries Processor object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibProcessor]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\n\nThe following alerts are available:\n\n| Alert name  | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | Average CPU utilization over the last 10 minutes |\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System\n\nThese metrics refer to the entire system.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | interrupts, user, privileged, dpc | percentage |\n\n### Per CPU\n\nThese metrics provide information about individual CPU cores.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| cpu | The identifier of the CPU core. On Windows systems, CPU cores are labeled as 'cpu0', 'cpu1', etc. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | interrupts, user, privileged, dpc | percentage |\n\n",integration_type:"collector",id:"windows.plugin-PerflibProcessor-Processor",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibServices",monitored_instance:{name:"Windows Services",link:"https://learn.microsoft.com/en-us/dotnet/framework/windows-services/",categories:["data-collection.operating-systems"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["windows","microsoft","services"]},overview:"# Windows Services\n\nPlugin: windows.plugin\nModule: PerflibServices\n\n## Overview\n\nThis collector monitors Windows Services Status and States.\n\n\nIt queries Service Management to get the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 30 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibServices]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Windows Service\n\nThese metrics refer to Windows Services.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| service | The service name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n\n",integration_type:"collector",id:"windows.plugin-PerflibServices-Windows_Services",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibStorage",monitored_instance:{name:"Physical and Logical Disk Performance Metrics",link:"https://learn.microsoft.com/en-us/windows/win32/fileio/disk-management",categories:["data-collection.storage"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["disk","disks","io","volume","physical","logical"]},overview:"# Physical and Logical Disk Performance Metrics\n\nPlugin: windows.plugin\nModule: PerflibStorage\n\n## Overview\n\nDetailed statistics for all disk devices and volumes.\n\n\nIt queries 'LogicalDisk' and 'PhysicalDisk' objects from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibStorage]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System\n\nThese metrics refer to the entire System.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | reads, writes | KiB/s |\n\n### Per Logical Disk\n\nThese metrics refer to Logical Disks.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| mount_point | Drive letter or mount point path assigned by Windows (e.g., 'C:', 'D:'). |\n| driver_type | Classification of the disk device (e.g., norootdir, removable, cdrom, ramdisk). |\n| filesystem | File system format used on the volume (e.g., NTFS, FAT32). |\n| rw_mode | Current read/write permissions status of the volume (read-only access, read and write access). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used | GiB |\n\n### Per Physical Disk\n\nThese metrics refer to Physical Disks.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| mount_point | Drive letter or mount point path assigned by Windows (e.g., 'C:', 'D:'). |\n| device | Manufacturer model name. |\n| model | The device model. |\n| device_id | Unique hardware identifier for the storage device within the system. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes | KiB/s |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk.ops | reads, writes | operations/s |\n| disk.split | discards | operations/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk.svctm | svctm | milliseconds/operation |\n| disk.util | utilization | percent |\n| disk.busy | busy | milliseconds |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk.qops | operations | operations |\n\n",integration_type:"collector",id:"windows.plugin-PerflibStorage-Physical_and_Logical_Disk_Performance_Metrics",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibThermalZone",monitored_instance:{name:"System thermal zone",link:"https://learn.microsoft.com/en-us/windows-hardware/design/device-experiences/design-guide",categories:["data-collection.hardware-and-sensors"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["thermal","temperature"]},overview:"# System thermal zone\n\nPlugin: windows.plugin\nModule: PerflibThermalZone\n\n## Overview\n\nThis collector monitors thermal zone statistics on Windows systems.\n\n\nIt queries for the 'Thermal Zone Information' object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 5 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibThermalZone]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Thermal zone\n\nThese metrics refer to a Thermal zone\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.thermalzone_temperature | temperature | celsius |\n\n",integration_type:"collector",id:"windows.plugin-PerflibThermalZone-System_thermal_zone",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"windows.plugin",module_name:"PerflibWebService",monitored_instance:{name:"IIS",link:"https://www.iis.net/",categories:["data-collection.web-servers-and-proxies"],icon_filename:"windows.svg"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:["IIS","HTTP","Web service"]},overview:"# IIS\n\nPlugin: windows.plugin\nModule: PerflibWebService\n\n## Overview\n\nThis collector monitors website requests and logins.\n\n\nIt queries the 'Web Service' object from Perflib in order to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- windows\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector automatically detects all of the metrics, no further configuration is required.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:"## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\n\n\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n\n\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:windows:PerflibWebService]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n",troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IIS website\n\nThese metrics refer to the IIS website.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| website | The name of the IIS website. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_requests_by_type_rate | options, get, post, head, put, delete, trace, move, copy, mkcol, propfind, proppatch, search, lock, unlock, other | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | uptime | seconds |\n\n### Per IIS Application Pool\n\nThese metrics refer to the IIS application pool WAS (Windows Process Activation Service).\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| app | The name of the Application Pool. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.application_pool_current_status | uninitialized, initialized, running, disabling, disabled, shutdown_pending, delete_pending | status |\n| iis.application_pool_current_worker_processes | running | processes |\n| iis.application_pool_worker_processes_created | created | processes/s |\n| iis.application_pool_maximum_worker_processes | created | processes |\n| iis.application_pool_recent_worker_process_failures | failures | failures/s |\n| iis.application_pool_worker_process_failures | crash, ping, startup, shutdown | failures/s |\n| iis.application_pool_recycles | recycles | recycles/s |\n| iis.application_pool_uptime | uptime | seconds |\n\n### Per IIS W3SCV W3MP\n\nThese metrics refer to the World Wide Web Publishing Service, the service responsible for hosting and serving web content.\n\nLabels:\n\n| Label      | Description     |\n|:-----------|:----------------|\n| app | Application name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.w3svc_w3wp_active_threads | threads | threads |\n| iis.w3svc_w3wp_requests_total | requests | requests/s |\n| iis.w3svc_w3wp_requests_active | requests | requests |\n| iis.w3svc_w3wp_file_cache_mem_usage | used | bytes |\n| iis.w3svc_w3wp_files_cache_total | cached_files | files/s |\n| iis.w3svc_w3wp_files_flushed_total | file_handles | flushes/s |\n| iis.w3svc_w3wp_uri_cache_flushed | cached_uris | flushes/s |\n| iis.w3svc_w3wp_total_uri_cached | uri_cache_blocks | blocks/s |\n| iis.w3svc_w3wp_total_metadata_cached | metadata_blocks | blocks/s |\n| iis.w3svc_w3wp_total_metadata_flushed | metadata_blocks | flushes/s |\n| iis.w3svc_w3wp_output_cache_active_flushed_items | used | items |\n| iis.w3svc_w3wp_output_cache_memory_usage | used | bytes |\n| iis.w3svc_w3wp_output_cache_flushed_total | output_cache_entries | flushes |\n\n",integration_type:"collector",id:"windows.plugin-PerflibWebService-IIS",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/metadata.yaml",functions:"",related_resources:""},{meta:{plugin_name:"xenstat.plugin",module_name:"xenstat.plugin",monitored_instance:{name:"Xen XCP-ng",link:"https://xenproject.org/",categories:["data-collection.containers-and-vms"],icon_filename:"xen.png"},related_resources:{integrations:{list:[]}},info_provided_to_referring_integrations:{description:""},keywords:[]},overview:"# Xen XCP-ng\n\nPlugin: xenstat.plugin\nModule: xenstat.plugin\n\n## Overview\n\nThis collector monitors XenServer and XCP-ng host and domains statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin requires the `xen-dom0-libs-devel` and `yajl-devel` libraries to be installed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### Libraries\n\n1. Install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.\n\n  Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel`\n\n2. Re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.\n\n\n\n### Configuration\n\n#### Options\n\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| update every | Data collection frequency. | 1 | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:xenstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n    option1 = some value\n    option2 = some other value\n\n[section2]\n    option3 = some third value\n```\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n\n##### Examples\nThere are no configuration examples.\n\n',troubleshooting:"",alerts:"## Alerts\n\nThere are no alerts configured by default for this integration.\n",metrics:"## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Xen XCP-ng instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xenstat.mem | free, used | MiB |\n| xenstat.domains | domains | domains |\n| xenstat.cpus | cpus | cpus |\n| xenstat.cpu_freq | frequency | MHz |\n\n### Per xendomain\n\nMetrics related to Xen domains. Each domain provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.states | running, blocked, paused, shutdown, crashed, dying | boolean |\n| xendomain.cpu | used | percentage |\n| xendomain.mem | maximum, current | MiB |\n| xendomain.vcpu | a dimension per vcpu | percentage |\n\n### Per xendomain vbd\n\nMetrics related to Xen domain Virtual Block Device. Each VBD provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.oo_req_vbd | requests | requests/s |\n| xendomain.requests_vbd | read, write | requests/s |\n| xendomain.sectors_vbd | read, write | sectors/s |\n\n### Per xendomain network\n\nMetrics related to Xen domain network interfaces. Each network interface provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.bytes_network | received, sent | kilobits/s |\n| xendomain.packets_network | received, sent | packets/s |\n| xendomain.errors_network | received, sent | errors/s |\n| xendomain.drops_network | received, sent | drops/s |\n\n",integration_type:"collector",id:"xenstat.plugin-xenstat.plugin-Xen_XCP-ng",edit_link:"https://github.com/netdata/netdata/blob/master/src/collectors/xenstat.plugin/metadata.yaml",functions:"",related_resources:""},{id:"deploy-alpinelinux",meta:{name:"Alpine Linux",link:"https://www.alpinelinux.org/",categories:["deploy.operating-systems"],icon_filename:"alpine.svg"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-amazonlinux",meta:{name:"Amazon Linux",link:"https://aws.amazon.com/amazon-linux-2/",categories:["deploy.operating-systems"],icon_filename:"amazonlinux.png"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 2 | Core | x86_64, aarch64 |  |\n| 2023 | Core | x86_64, aarch64 |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-archlinux",meta:{name:"Arch Linux",link:"https://archlinux.org/",categories:["deploy.operating-systems"],icon_filename:"archlinux.png"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate |  |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-centos",meta:{name:"CentOS",link:"https://www.centos.org/",categories:["deploy.operating-systems"],icon_filename:"centos.svg"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 7 | Core | x86_64 |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-centos-stream",meta:{name:"CentOS Stream",link:"https://www.centos.org/centos-stream",categories:["deploy.operating-systems"],icon_filename:"centos.svg"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 10 | Community | x86_64, aarch64 |  |\n| 9 | Community | x86_64, aarch64 |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-debian",meta:{name:"Debian",link:"https://www.debian.org/",categories:["deploy.operating-systems"],icon_filename:"debian.svg"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13 | Core | amd64, armhf, arm64 |  |\n| 12 | Core | i386, amd64, armhf, arm64 |  |\n| 11 | Core | i386, amd64, armhf, arm64 |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-docker",meta:{name:"Docker",link:"https://www.docker.com/",categories:["deploy.docker-kubernetes"],icon_filename:"docker.svg"},keywords:["docker","container","containers"],install_description:"Install and connect new Docker containers\nFind the commands for `docker run`, `docker compose` or `Docker Swarm`. On the last two you can copy the configs, then run `docker-compose up -d` in the same directory as the `docker-compose.yml`\n\n> Netdata container requires different privileges and mounts to provide functionality similar to that provided by Netdata installed on the host. More info [here](https://learn.netdata.cloud/docs/installing/docker?_gl=1*f2xcnf*_ga*MTI1MTUwMzU0OS4xNjg2NjM1MDA1*_ga_J69Z2JCTFB*MTY5MDMxMDIyMS40MS4xLjE2OTAzMTAzNjkuNTguMC4w#create-a-new-netdata-agent-container)\n> Netdata will use the hostname from the container in which it is run instead of that of the host system. To change the default hostname check [here](https://learn.netdata.cloud/docs/agent/packaging/docker?_gl=1*i5weve*_ga*MTI1MTUwMzU0OS4xNjg2NjM1MDA1*_ga_J69Z2JCTFB*MTY5MDMxMjM4Ny40Mi4xLjE2OTAzMTIzOTAuNTcuMC4w#change-the-default-hostname)\n",methods:[{method:"Docker CLI",commands:[{channel:"nightly",command:"docker run -d --name=netdata \\\n--pid=host \\\n--network=host \\\n-v netdataconfig:/etc/netdata \\\n-v netdatalib:/var/lib/netdata \\\n-v netdatacache:/var/cache/netdata \\\n-v /:/host/root:ro,rslave \\\n-v /etc/passwd:/host/etc/passwd:ro \\\n-v /etc/group:/host/etc/group:ro \\\n-v /etc/localtime:/etc/localtime:ro \\\n-v /proc:/host/proc:ro \\\n-v /sys:/host/sys:ro \\\n-v /etc/os-release:/host/etc/os-release:ro \\\n-v /var/log:/host/var/log:ro \\\n-v /var/run/docker.sock:/var/run/docker.sock:ro \\\n-v /run/dbus:/run/dbus:ro \\\n--restart unless-stopped \\\n--cap-add SYS_PTRACE \\\n--cap-add SYS_ADMIN \\\n--security-opt apparmor=unconfined \\\n{% if $showClaimingOptions %}\n-e NETDATA_CLAIM_TOKEN={% claim_token %} \\\n-e NETDATA_CLAIM_URL={% claim_url %} \\\n-e NETDATA_CLAIM_ROOMS={% $claim_rooms %} \\\n{% /if %}\nnetdata/netdata:edge\n"},{channel:"stable",command:"docker run -d --name=netdata \\\n--pid=host \\\n--network=host \\\n-v netdataconfig:/etc/netdata \\\n-v netdatalib:/var/lib/netdata \\\n-v netdatacache:/var/cache/netdata \\\n-v /:/host/root:ro,rslave \\\n-v /etc/passwd:/host/etc/passwd:ro \\\n-v /etc/group:/host/etc/group:ro \\\n-v /etc/localtime:/etc/localtime:ro \\\n-v /proc:/host/proc:ro \\\n-v /sys:/host/sys:ro \\\n-v /etc/os-release:/host/etc/os-release:ro \\\n-v /var/log:/host/var/log:ro \\\n-v /var/run/docker.sock:/var/run/docker.sock:ro \\\n-v /run/dbus:/run/dbus:ro \\\n--restart unless-stopped \\\n--cap-add SYS_PTRACE \\\n--cap-add SYS_ADMIN \\\n--security-opt apparmor=unconfined \\\n{% if $showClaimingOptions %}\n-e NETDATA_CLAIM_TOKEN={% claim_token %} \\\n-e NETDATA_CLAIM_URL={% claim_url %} \\\n-e NETDATA_CLAIM_ROOMS={% $claim_rooms %} \\\n{% /if %}\nnetdata/netdata:stable\n"}]},{method:"Docker Compose",commands:[{channel:"nightly",command:"version: '3'\nservices:\n  netdata:\n    image: netdata/netdata:edge\n    container_name: netdata\n    pid: host\n    network_mode: host\n    restart: unless-stopped\n    cap_add:\n      - SYS_PTRACE\n      - SYS_ADMIN\n    security_opt:\n      - apparmor:unconfined\n    volumes:\n      - netdataconfig:/etc/netdata\n      - netdatalib:/var/lib/netdata\n      - netdatacache:/var/cache/netdata\n      - /:/host/root:ro,rslave\n      - /etc/passwd:/host/etc/passwd:ro\n      - /etc/group:/host/etc/group:ro\n      - /etc/localtime:/etc/localtime:ro\n      - /proc:/host/proc:ro\n      - /sys:/host/sys:ro\n      - /etc/os-release:/host/etc/os-release:ro\n      - /var/log:/host/var/log:ro\n      - /var/run/docker.sock:/var/run/docker.sock:ro\n      - /run/dbus:/run/dbus:ro\n{% if $showClaimingOptions %}\n    environment:\n      - NETDATA_CLAIM_TOKEN={% claim_token %}\n      - NETDATA_CLAIM_URL={% claim_url %}\n      - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\nvolumes:\n  netdataconfig:\n  netdatalib:\n  netdatacache:\n"},{channel:"stable",command:"version: '3'\nservices:\n  netdata:\n    image: netdata/netdata:stable\n    container_name: netdata\n    pid: host\n    network_mode: host\n    restart: unless-stopped\n    cap_add:\n      - SYS_PTRACE\n      - SYS_ADMIN\n    security_opt:\n      - apparmor:unconfined\n    volumes:\n      - netdataconfig:/etc/netdata\n      - netdatalib:/var/lib/netdata\n      - netdatacache:/var/cache/netdata\n      - /:/host/root:ro,rslave\n      - /etc/passwd:/host/etc/passwd:ro\n      - /etc/group:/host/etc/group:ro\n      - /etc/localtime:/etc/localtime:ro\n      - /proc:/host/proc:ro\n      - /sys:/host/sys:ro\n      - /etc/os-release:/host/etc/os-release:ro\n      - /var/log:/host/var/log:ro\n      - /var/run/docker.sock:/var/run/docker.sock:ro\n      - /run/dbus:/run/dbus:ro\n{% if $showClaimingOptions %}\n    environment:\n      - NETDATA_CLAIM_TOKEN={% claim_token %}\n      - NETDATA_CLAIM_URL={% claim_url %}\n      - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\nvolumes:\n  netdataconfig:\n  netdatalib:\n  netdatacache:\n"}]},{method:"Docker Swarm",commands:[{channel:"nightly",command:"version: '3'\nservices:\n  netdata:\n    image: netdata/netdata:edge\n    pid: host\n    network_mode: host\n    cap_add:\n      - SYS_PTRACE\n      - SYS_ADMIN\n    security_opt:\n      - apparmor:unconfined\n    volumes:\n      - netdataconfig:/etc/netdata\n      - netdatalib:/var/lib/netdata\n      - netdatacache:/var/cache/netdata\n      - /:/host/root:ro,rslave\n      - /etc/passwd:/host/etc/passwd:ro\n      - /etc/group:/host/etc/group:ro\n      - /etc/localtime:/etc/localtime:ro\n      - /proc:/host/proc:ro\n      - /sys:/host/sys:ro\n      - /etc/os-release:/host/etc/os-release:ro\n      - /etc/hostname:/etc/hostname:ro\n      - /var/log:/host/var/log:ro\n      - /var/run/docker.sock:/var/run/docker.sock:ro\n      - /run/dbus:/run/dbus:ro\n{% if $showClaimingOptions %}\n    environment:\n      - NETDATA_CLAIM_TOKEN={% claim_token %}\n      - NETDATA_CLAIM_URL={% claim_url %}\n      - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\n    deploy:\n      mode: global\n      restart_policy:\n        condition: on-failure\nvolumes:\n  netdataconfig:\n  netdatalib:\n  netdatacache:\n"},{channel:"stable",command:"version: '3'\nservices:\n  netdata:\n    image: netdata/netdata:stable\n    pid: host\n    network_mode: host\n    cap_add:\n      - SYS_PTRACE\n      - SYS_ADMIN\n    security_opt:\n      - apparmor:unconfined\n    volumes:\n      - netdataconfig:/etc/netdata\n      - netdatalib:/var/lib/netdata\n      - netdatacache:/var/cache/netdata\n      - /:/host/root:ro,rslave\n      - /etc/passwd:/host/etc/passwd:ro\n      - /etc/group:/host/etc/group:ro\n      - /etc/localtime:/etc/localtime:ro\n      - /proc:/host/proc:ro\n      - /sys:/host/sys:ro\n      - /etc/os-release:/host/etc/os-release:ro\n      - /etc/hostname:/etc/hostname:ro\n      - /var/log:/host/var/log:ro\n      - /var/run/docker.sock:/var/run/docker.sock:ro\n      - /run/dbus:/run/dbus:ro\n{% if $showClaimingOptions %}\n    environment:\n      - NETDATA_CLAIM_TOKEN={% claim_token %}\n      - NETDATA_CLAIM_URL={% claim_url %}\n      - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\n    deploy:\n      mode: global\n      restart_policy:\n        condition: on-failure\nvolumes:\n  netdataconfig:\n  netdatalib:\n  netdatacache:\n"}]}],additional_info:"",related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 19.03 or newer | Core | linux/i386, linux/amd64, linux/arm/v7, linux/arm64 |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:3,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-fedora",meta:{name:"Fedora",link:"https://www.fedoraproject.org/",categories:["deploy.operating-systems"],icon_filename:"fedora.png"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 43 | Core | x86_64, aarch64 |  |\n| 42 | Core | x86_64, aarch64 |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-freebsd",meta:{name:"FreeBSD",link:"https://www.freebsd.org/",categories:["deploy.operating-systems"],icon_filename:"freebsd.svg"},keywords:["freebsd"],install_description:"## Install dependencies\nPlease install the following packages using the command below:\n\n```pkg install bash e2fsprogs-libuuid git curl autoconf automake pkgconf pidof liblz4 libuv json-c cmake gmake```\nThis step needs root privileges. Please respond in the affirmative for any relevant prompts during the installation process.\n\nRun the following command on your node to install and claim Netdata:\n",methods:[{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"fetch",commands:[{channel:"nightly",command:"fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:"Netdata can also be installed via [FreeBSD ports](https://www.freshports.org/net-mgmt/netdata).\n",related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13-STABLE | Community |  |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:6,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-kubernetes",meta:{name:"Kubernetes (Helm)",link:"",categories:["deploy.docker-kubernetes"],icon_filename:"kubernetes.svg"},keywords:["kubernetes","container","Orchestrator"],install_description:"**Use helm install to install Netdata on your Kubernetes cluster**\nFor a new installation use `helm install` or for existing clusters add the content below to your `override.yaml` and then run `helm upgrade -f override.yml netdata netdata/netdata`\n",methods:[{method:"Helm",commands:[{channel:"nightly",command:'helm repo add netdata https://netdata.github.io/helmchart/ && helm repo update\nhelm install netdata netdata/netdata \\\n--set image.tag=edge{% if $showClaimingOptions %} \\\n--set parent.claiming.enabled="true" \\\n--set parent.claiming.token={% claim_token %} \\\n--set parent.claiming.rooms={% $claim_rooms %} \\\n--set child.claiming.enabled="true" \\\n--set child.claiming.token={% claim_token %} \\\n--set child.claiming.rooms={% $claim_rooms %}{% /if %}\n'},{channel:"stable",command:'helm repo add netdata https://netdata.github.io/helmchart/ && helm repo update\nhelm install netdata netdata/netdata \\\n--set image.tag=stable{% if $showClaimingOptions %} \\\n--set parent.claiming.enabled="true" \\\n--set parent.claiming.token={% claim_token %} \\\n--set parent.claiming.rooms={% $claim_rooms %} \\\n--set child.claiming.enabled="true" \\\n--set child.claiming.token={% claim_token %} \\\n--set child.claiming.rooms={% $claim_rooms %}{% /if %}\n'}]},{method:"Existing Cluster",commands:[{channel:"nightly",command:"image:\n  tag: edge\n\nrestarter:\n  enabled: true\n{% if $showClaimingOptions %}\n\nparent:\n  claiming:\n    enabled: true\n    token: {% claim_token %}\n    rooms: {% $claim_rooms %}\n\nchild:\n  claiming:\n    enabled: true\n    token: {% claim_token %}\n    rooms: {% $claim_rooms %}\n{% /if %}\n"},{channel:"stable",command:"image:\n  tag: stable\n\nrestarter:\n  enabled: true\n{% if $showClaimingOptions %}\n\nparent:\n  claiming:\n    enabled: true\n    token: {% claim_token %}\n    rooms: {% $claim_rooms %}\n\nchild:\n  claiming:\n    enabled: true\n    token: {% claim_token %}\n    rooms: {% $claim_rooms %}\n{% /if %}\n"}]}],additional_info:"",related_resources:{},platform_info:"",quick_start:4,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-linux-generic",meta:{name:"Linux",link:"",categories:["deploy.operating-systems"],icon_filename:"linux.svg"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"",quick_start:1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-macos",meta:{name:"macOS",link:"",categories:["deploy.operating-systems"],icon_filename:"macos.svg"},keywords:["macOS","mac","apple"],install_description:"Run the following command on your Intel based OSX, macOS servers to install and claim Netdata:",methods:[{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13 | Community |  |  |\n| 12 | Community |  |  |\n| 11 | Community |  |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:5,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-manjarolinux",meta:{name:"Manjaro Linux",link:"https://manjaro.org/",categories:["deploy.operating-systems"],icon_filename:"manjaro.svg"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate |  |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-opensuse",meta:{name:"SUSE Linux",link:"https://www.suse.com/",categories:["deploy.operating-systems"],icon_filename:"openSUSE.svg"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| tumbleweed | Core | x86_64, aarch64 |  |\n| 16.0 | Core | x86_64, aarch64 |  |\n| 15.6 | Core | x86_64, aarch64 |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-oraclelinux",meta:{name:"Oracle Linux",link:"https://www.oracle.com/linux/",categories:["deploy.operating-systems"],icon_filename:"oraclelinux.svg"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 10 | Core | x86_64, aarch64 |  |\n| 9 | Core | x86_64, aarch64 |  |\n| 8 | Core | x86_64, aarch64 |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-rhel",meta:{name:"Red Hat Enterprise Linux",link:"https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux",categories:["deploy.operating-systems"],icon_filename:"rhel.png"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 10.x | Core | x86_64, aarch64 |  |\n| 9.x | Core | x86_64, aarch64 |  |\n| 8.x | Core | x86_64, aarch64 |  |\n| 7.x | Core | x86_64 |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-rockylinux",meta:{name:"Rocky Linux",link:"https://rockylinux.org/",categories:["deploy.operating-systems"],icon_filename:"rocky.svg"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 10 | Core | x86_64, aarch64 |  |\n| 9 | Core | x86_64, aarch64 |  |\n| 8 | Core | x86_64, aarch64 |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-ubuntu",meta:{name:"Ubuntu",link:"https://ubuntu.com/",categories:["deploy.operating-systems"],icon_filename:"ubuntu.svg"},keywords:["linux"],install_description:"Run the following command on your node to install and connnect Netdata to your Space:",methods:[{method:"wget",commands:[{channel:"nightly",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]},{method:"curl",commands:[{channel:"nightly",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"},{channel:"stable",command:"curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}],additional_info:'Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId="deploy.docker-kubernetes" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId="deploy.docker-kubernetes" %}Docker{% /goToCategory %}?\n',related_resources:{},platform_info:"We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 24.04 | Core | amd64, armhf, arm64 |  |\n| 25.10 | Core | amd64, armhf, arm64 |  |\n| 22.04 | Core | amd64, armhf, arm64 |  |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n",quick_start:-1,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"deploy-windows",meta:{name:"Windows",link:"https://www.microsoft.com/en-us/windows",categories:["deploy.operating-systems"],icon_filename:"windows.svg"},keywords:["windows"],install_description:"Netdata offers a convenient Windows installer for easy setup. This executable provides two distinct installation modes, outlined below.\n\n## Graphical User Interface (GUI)\n\n**Download the MSI Installer**:\n  - [Stable version](https://github.com/netdata/netdata/releases/latest/download/netdata-x64.msi)\n  - [Nightly version](https://github.com/netdata/netdata-nightlies/releases/latest/download/netdata-x64.msi)\n\nDouble-click the installer to start the installation process. As Netdata adds a service to your system, you'll need to provide administrator privileges.\n\nOnce installed, you can access your Netdata dashboard at `localhost:19999`.\n\n## Silent Mode (Command line)\n\nIf you prefer to install Netdata through the command line, you can do so by running the following command on Windows Powershell with administrator rights.\n",methods:[{method:"Silent Mode (Command line)",commands:[{channel:"stable",command:"$ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest https://github.com/netdata/netdata/releases/latest/download/netdata-x64.msi -OutFile \"netdata-x64.msi\"; msiexec /qn /i netdata-x64.msi {% if $showClaimingOptions %}TOKEN={% claim_token %} ROOMS={% $claim_rooms %}{% /if %}\n"},{channel:"nightly",command:"$ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest https://github.com/netdata/netdata-nightlies/releases/latest/download/netdata-x64.msi -OutFile \"netdata-x64.msi\"; msiexec /qn /i netdata-x64.msi {% if $showClaimingOptions %}TOKEN={% claim_token %} ROOMS={% $claim_rooms %}{% /if %}\n"}]}],additional_info:"### Available CLI Options\n\n| Option       | Description                                                                                      |\n|--------------|--------------------------------------------------------------------------------------------------|\n| `/qn`        | Enables silent mode installation.                                                                |\n| `/i`         | Specifies the path to the MSI installer file.                                                    |\n| `INSECURE=1` | Forces insecure connections, bypassing hostname verification (use only if absolutely necessary). |\n| `TOKEN=`     | Sets the Claim Token for your Netdata Cloud Space.                                               |\n| `ROOMS=`     | Comma-separated list of Room IDs where you want your node to appear.                             |\n| `PROXY=`     | Sets the proxy server address if your network requires one.                                      |\n",related_resources:{},platform_info:"",quick_start:2,integration_type:"deploy",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"},{id:"export-appoptics",meta:{name:"AppOptics",link:"https://www.solarwinds.com/appoptics",categories:["export"],icon_filename:"solarwinds.svg",keywords:["app optics","AppOptics","Solarwinds"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# AppOptics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-aws-kinesis",meta:{name:"AWS Kinesis",link:"https://aws.amazon.com/kinesis/",categories:["export"],icon_filename:"aws-kinesis.svg"},keywords:["exporter","AWS","Kinesis"],overview:"# AWS Kinesis\n\nExport metrics to AWS Kinesis Data Streams\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++\n- Here are the instructions when building from source, to ensure 3rd party dependencies are installed:\n  ```bash\n  git clone --recursive https://github.com/aws/aws-sdk-cpp.git\n  cd aws-sdk-cpp/\n  git submodule update --init --recursive\n  mkdir BUILT\n  cd BUILT\n  cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis ..\n  make\n  make install\n  ```\n- `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled.\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### Options\n\nNetdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly.\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic configuration\n\n```yaml\n[kinesis:my_instance]\n    enabled = yes\n    destination = us-east-1\n\n```\n###### Configuration with AWS credentials\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[kinesis:my_instance]\n    enabled = yes\n    destination = us-east-1\n    # AWS credentials\n    aws_access_key_id = your_access_key_id\n    aws_secret_access_key = your_secret_access_key\n    # destination stream\n    stream name = your_stream_name\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/aws_kinesis/metadata.yaml",troubleshooting:""},{id:"export-azure-data",meta:{name:"Azure Data Explorer",link:"https://azure.microsoft.com/en-us/pricing/details/data-explorer/",categories:["export"],icon_filename:"azuredataex.jpg",keywords:["Azure Data Explorer","Azure"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Azure Data Explorer\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-azure-event",meta:{name:"Azure Event Hub",link:"https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-about",categories:["export"],icon_filename:"azureeventhub.png",keywords:["Azure Event Hub","Azure"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Azure Event Hub\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-bigquery",meta:{name:"Google BigQuery",link:"https://cloud.google.com/bigquery/",categories:["export"],icon_filename:"bigquery.png",keywords:["export","Google BigQuery","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Google BigQuery\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-blueflood",meta:{name:"Blueflood",link:"http://blueflood.io/",categories:["export"],icon_filename:"blueflood.png",keywords:["export","Blueflood","graphite"]},keywords:["exporter","graphite","remote write","time series"],overview:"# Blueflood\n\nExport your Netdata metrics to Blueflood DB using the Graphite protocol, allowing you to archive your data for long-term storage, further analysis, or correlation with data from other sources.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n    enabled = yes\n    destination = localhost:2003\n\n```\n###### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n    enabled = yes\n    destination = localhost:2003\n    username = my_username\n    password = my_password\n\n```\n###### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n    enabled = yes\n    username = my_username\n    password = my_password\n    destination = 10.10.1.114:2003\n    # data source = average\n    # prefix = netdata\n    # hostname = my_hostname\n    # update every = 10\n    # buffer on failures = 10\n    # timeout ms = 20000\n    # send names instead of ids = yes\n    # send charts matching = *\n    # send hosts matching = localhost *\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml",troubleshooting:""},{id:"export-chronix",meta:{name:"Chronix",link:"https://dbdb.io/db/chronix",categories:["export"],icon_filename:"chronix.png",keywords:["export","chronix","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Chronix\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-cortex",meta:{name:"Cortex",link:"https://cortexmetrics.io/",categories:["export"],icon_filename:"cortex.png",keywords:["export","cortex","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Cortex\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-crate",meta:{name:"CrateDB",link:"https://crate.io/",categories:["export"],icon_filename:"crate.svg",keywords:["export","CrateDB","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# CrateDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-elastic",meta:{name:"ElasticSearch",link:"https://www.elastic.co/",categories:["export"],icon_filename:"elasticsearch.svg",keywords:["export","ElasticSearch","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# ElasticSearch\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-gnocchi",meta:{name:"Gnocchi",link:"https://wiki.openstack.org/wiki/Gnocchi",categories:["export"],icon_filename:"gnocchi.svg",keywords:["export","Gnocchi","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Gnocchi\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-google-pubsub",meta:{name:"Google Cloud Pub Sub",link:"https://cloud.google.com/pubsub",categories:["export"],icon_filename:"pubsub.png"},keywords:["exporter","Google Cloud","Pub Sub"],overview:"# Google Cloud Pub Sub\n\nExport metrics to Google Cloud Pub/Sub Service\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- First [install](https://github.com/googleapis/google-cloud-cpp/) install Google Cloud Platform C++ Client Libraries\n- Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = pubsub.googleapis.com\n  ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Basic configuration\n\n- Set the destination option to a Pub/Sub service endpoint. pubsub.googleapis.com is the default one.\n- Create the credentials JSON file by following Google Cloud\'s authentication guide.\n- The user running the Agent (typically netdata) needs read access to google_cloud_credentials.json, which you can set\n  `chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`\n- Set the credentials file option to the full path of the file.\n\n\n```yaml\n[pubsub:my_instance]\n    enabled = yes\n    destination = pubsub.googleapis.com\n    credentials file = /etc/netdata/google_cloud_credentials.json\n    project id = my_project\n    topic id = my_topic\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/pubsub/metadata.yaml",troubleshooting:""},{id:"export-graphite",meta:{name:"Graphite",link:"https://graphite.readthedocs.io/en/latest/",categories:["export"],icon_filename:"graphite.png"},keywords:["exporter","graphite","remote write","time series"],overview:"# Graphite\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n    enabled = yes\n    destination = localhost:2003\n\n```\n###### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n    enabled = yes\n    destination = localhost:2003\n    username = my_username\n    password = my_password\n\n```\n###### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n    enabled = yes\n    username = my_username\n    password = my_password\n    destination = 10.10.1.114:2003\n    # data source = average\n    # prefix = netdata\n    # hostname = my_hostname\n    # update every = 10\n    # buffer on failures = 10\n    # timeout ms = 20000\n    # send names instead of ids = yes\n    # send charts matching = *\n    # send hosts matching = localhost *\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml",troubleshooting:""},{id:"export-greptimedb",meta:{name:"GreptimeDB",link:"https://greptime.com/product/db",categories:["export"],icon_filename:"greptimedb.svg",keywords:["export","GreptimeDB","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# GreptimeDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-irondb",meta:{name:"IRONdb",link:"https://docs.circonus.com/irondb/",categories:["export"],icon_filename:"irondb.png",keywords:["export","IRONdb","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# IRONdb\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-json",meta:{name:"JSON",link:"",categories:["export"],icon_filename:"json.svg"},keywords:["exporter","json"],overview:"# JSON\n\nUse the JSON connector for the exporting engine to archive your Agent's metrics to JSON document databases for long-term storage,\nfurther analysis, or correlation with data from other sources\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = localhost:5448\n  ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Basic configuration\n\n\n\n```yaml\n[json:my_json_instance]\n    enabled = yes\n    destination = localhost:5448\n\n```\n###### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `json:https:my_json_instance`.\n\n```yaml\n[json:my_json_instance]\n    enabled = yes\n    destination = localhost:5448\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/json/metadata.yaml",troubleshooting:""},{id:"export-kafka",meta:{name:"Kafka",link:"https://kafka.apache.org/",categories:["export"],icon_filename:"kafka.svg",keywords:["export","Kafka","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Kafka\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-kairosdb",meta:{name:"KairosDB",link:"https://kairosdb.github.io/",categories:["export"],icon_filename:"kairos.png",keywords:["KairosDB","kairos","export","graphite"]},keywords:["exporter","graphite","remote write","time series"],overview:"# KairosDB\n\nExport your Netdata metrics to KairosDB using the Graphite protocol, allowing you to archive your data for long-term storage, further analysis, or correlation with data from other sources.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n    enabled = yes\n    destination = localhost:2003\n\n```\n###### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n    enabled = yes\n    destination = localhost:2003\n    username = my_username\n    password = my_password\n\n```\n###### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n    enabled = yes\n    username = my_username\n    password = my_password\n    destination = 10.10.1.114:2003\n    # data source = average\n    # prefix = netdata\n    # hostname = my_hostname\n    # update every = 10\n    # buffer on failures = 10\n    # timeout ms = 20000\n    # send names instead of ids = yes\n    # send charts matching = *\n    # send hosts matching = localhost *\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml",troubleshooting:""},{id:"export-m3db",meta:{name:"M3DB",link:"https://m3db.io/",categories:["export"],icon_filename:"m3db.png",keywords:["export","M3DB","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# M3DB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-metricfire",meta:{name:"MetricFire",link:"https://www.metricfire.com/",categories:["export"],icon_filename:"metricfire.png",keywords:["export","MetricFire","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# MetricFire\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-mongodb",meta:{name:"MongoDB",link:"https://www.mongodb.com/",categories:["export"],icon_filename:"mongodb.svg"},keywords:["exporter","MongoDB"],overview:"# MongoDB\n\nUse the MongoDB connector for the exporting engine to archive your Agent's metrics to a MongoDB database\nfor long-term storage, further analysis, or correlation with data from other sources.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- To use MongoDB as an external storage for long-term archiving, you should first [install](https://www.mongodb.com/docs/languages/c/c-driver/current/libmongoc/tutorials/obtaining-libraries/installing/#std-label-installing) libmongoc 1.7.0 or higher.\n- Next, re-install Netdata from the source, which detects that the required library is now available.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | localhost | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Basic configuration\n\nThe default socket timeout depends on the exporting connector update interval.\nThe timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.\n\n\n```yaml\n[mongodb:my_instance]\n    enabled = yes\n    destination = mongodb://<hostname>\n    database = your_database_name\n    collection = your_collection_name\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/mongodb/metadata.yaml",troubleshooting:""},{id:"export-newrelic",meta:{name:"New Relic",link:"https://newrelic.com/",categories:["export"],icon_filename:"newrelic.svg",keywords:["export","NewRelic","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# New Relic\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-opensearch",meta:{name:"OpeanSearch",link:"https://opensearch.org/",categories:["export"],icon_filename:"opensearch.svg",keywords:["export","OpenSearch","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# OpeanSearch\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-opentsdb",meta:{name:"OpenTSDB",link:"https://github.com/OpenTSDB/opentsdb",categories:["export"],icon_filename:"opentsdb.png"},keywords:["exporter","OpenTSDB","scalable time series"],overview:"# OpenTSDB\n\nUse the OpenTSDB connector for the exporting engine to archive your Netdata metrics to OpenTSDB databases for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- OpenTSDB and Netdata, installed, configured and operational.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used (opentsdb = 4242).\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Minimal configuration\n\nAdd `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol.\nFor example: `opentsdb:http:my_opentsdb_instance`, `opentsdb:https:my_opentsdb_instance`.\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n  enabled = yes\n  destination = localhost:4242\n\n```\n###### HTTP authentication\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n    enabled = yes\n    destination = localhost:4242\n    username = my_username\n    password = my_password\n\n```\n###### Using `send hosts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n    enabled = yes\n    destination = localhost:4242\n    send hosts matching = localhost *\n\n```\n###### Using `send charts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n    enabled = yes\n    destination = localhost:4242\n    send charts matching = *\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/opentsdb/metadata.yaml",troubleshooting:""},{id:"export-pgsql",meta:{name:"PostgreSQL",link:"https://www.postgresql.org/",categories:["export"],icon_filename:"postgres.svg",keywords:["export","PostgreSQL","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# PostgreSQL\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-prometheus-remote",meta:{name:"Prometheus Remote Write",link:"https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage",categories:["export"],icon_filename:"prometheus.svg"},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Prometheus Remote Write\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-quasar",meta:{name:"QuasarDB",link:"https://doc.quasar.ai/master/",categories:["export"],icon_filename:"quasar.jpeg",keywords:["export","quasar","quasarDB","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# QuasarDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-splunk",meta:{name:"Splunk SignalFx",link:"https://www.splunk.com/en_us/products/observability.html",categories:["export"],icon_filename:"splunk.svg",keywords:["export","splunk","signalfx","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Splunk SignalFx\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-thanos",meta:{name:"Thanos",link:"https://thanos.io/",categories:["export"],icon_filename:"thanos.png",keywords:["export","thanos","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Thanos\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-tikv",meta:{name:"TiKV",link:"https://tikv.org/",categories:["export"],icon_filename:"tikv.png",keywords:["export","TiKV","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# TiKV\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-timescaledb",meta:{name:"TimescaleDB",link:"https://www.timescale.com/",categories:["export"],icon_filename:"timescale.png",keywords:["export","TimescaleDB","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# TimescaleDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-victoria",meta:{name:"VictoriaMetrics",link:"https://victoriametrics.com/products/open-source/",categories:["export"],icon_filename:"victoriametrics.png",keywords:["export","victoriametrics","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# VictoriaMetrics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-vmware",meta:{name:"VMware Aria",link:"https://www.vmware.com/products/aria-operations-for-applications.html",categories:["export"],icon_filename:"aria.png",keywords:["export","VMware","Aria","Tanzu","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# VMware Aria\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"export-wavefront",meta:{name:"Wavefront",link:"https://docs.wavefront.com/wavefront_data_ingestion.html",categories:["export"],icon_filename:"wavefront.png",keywords:["export","Wavefront","prometheus","remote write"]},keywords:["exporter","Prometheus","remote write","time series"],overview:"# Wavefront\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details open=true summary="Config options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| [destination](#option-destination) | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) |  | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| [update every](#option-update-every) | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| [buffer on failures](#option-buffer-on-failures) | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| [send hosts matching](#option-send-hosts-matching) | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| [send charts matching](#option-send-charts-matching) | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| [send names instead of ids](#option-send-names-instead-of-ids) | Controls the metric names Netdata should send to the external database (yes/no). |  | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). |  | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). |  | no |\n\n<a id="option-destination"></a>\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n  ```yaml\n  destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n  ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n<a id="option-update-every"></a>\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n<a id="option-buffer-on-failures"></a>\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n<a id="option-send-hosts-matching"></a>\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n<a id="option-send-charts-matching"></a>\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n<a id="option-send-names-instead-of-ids"></a>\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n\n##### Examples\n\n###### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n\n```\n###### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n    enabled = yes\n    destination = 10.11.14.2:2003\n    remote write URL path = /receive\n    username = my_username\n    password = my_password\n\n```\n',integration_type:"exporter",edit_link:"https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml",troubleshooting:""},{id:"notify-alerta",meta:{name:"Alerta",link:"https://alerta.io/",categories:["notify.agent"],icon_filename:"alerta.png"},keywords:["Alerta"],overview:"# Alerta\n\nThe [Alerta](https://alerta.io/) monitoring system is a tool used to consolidate and de-duplicate alerts from multiple sources for quick \u2018at-a-glance\u2019 visualization. With just one system you can monitor alerts from many other monitoring tools on a single screen.\nYou can send Netdata alerts to Alerta to see alerts coming from many Netdata hosts or also from a multi-host Netdata configuration.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- A working Alerta instance\n- An Alerta API key (if authentication in Alerta is enabled)\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_ALERTA | Set `SEND_ALERTA` to YES |  | yes |\n| ALERTA_WEBHOOK_URL | set `ALERTA_WEBHOOK_URL` to the API url you defined when you installed the Alerta server. |  | yes |\n| [ALERTA_API_KEY](#option-alerta-api-key) | Set `ALERTA_API_KEY` to your API key. |  | yes |\n| DEFAULT_RECIPIENT_ALERTA | Set `DEFAULT_RECIPIENT_ALERTA` to the default recipient environment you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. |  | yes |\n| [DEFAULT_RECIPIENT_CUSTOM](#option-default-recipient-custom) | Set different recipient environments per role, by editing `DEFAULT_RECIPIENT_CUSTOM` with the environment name of your choice |  | no |\n\n<a id="option-alerta-api-key"></a>\n##### ALERTA_API_KEY\n\nYou will need an API key to send messages from any source, if Alerta is configured to use authentication (recommended). To create a new API key:\n1. Go to Configuration > API Keys.\n2. Create a new API key called "netdata" with `write:alerts` permission.\n\n\n<a id="option-default-recipient-custom"></a>\n##### DEFAULT_RECIPIENT_CUSTOM\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_alerta[sysadmin]="Systems"\nrole_recipients_alerta[domainadmin]="Domains"\nrole_recipients_alerta[dba]="Databases Systems"\nrole_recipients_alerta[webmaster]="Marketing Development"\nrole_recipients_alerta[proxyadmin]="Proxy"\nrole_recipients_alerta[sitemgr]="Sites"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# alerta (alerta.io) global notification options\n\nSEND_ALERTA="YES"\nALERTA_WEBHOOK_URL="http://yourserver/alerta/api"\nALERTA_API_KEY="INSERT_YOUR_API_KEY_HERE"\nDEFAULT_RECIPIENT_ALERTA="Production"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/alerta/metadata.yaml"},{id:"notify-awssns",meta:{name:"AWS SNS",link:"https://aws.amazon.com/sns/",categories:["notify.agent"],icon_filename:"aws.svg"},keywords:["AWS SNS"],overview:"# AWS SNS\n\nAs part of its AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' (SNS). Amazon SNS works similarly to Netdata's own notification system, allowing to dispatch a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to:\n- Email addresses\n- Mobile Phones via SMS\n- HTTP or HTTPS web hooks\n- AWS Lambda functions\n- AWS SQS queues\n- Mobile applications via push notifications\nYou can send notifications through Amazon SNS using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- While Amazon SNS supports sending differently formatted messages for different delivery methods, Netdata does not currently support this functionality.\n- For email notification support, we recommend using Netdata's email notifications, as it is has the following benefits:\n  - In most cases, it requires less configuration.\n  - Netdata's emails are nicely pre-formatted and support features like threading, which requires a lot of manual effort in SNS.\n  - It is less resource intensive and more cost-efficient than SNS.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- The [Amazon Web Services CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) (awscli).\n- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. The setup depends on the distribution, but `/var/lib/netdata` is the recommended directory. If you are using Netdata as a dedicated user, the permissions will already be correct.\n- An Amazon SNS topic to send notifications to with one or more subscribers. The Getting Started section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.\n- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. For an additional layer of security, you can create one for each system or group of systems.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| aws path | The full path of the aws command. If empty, the system `$PATH` will be searched for it. If not found, Amazon SNS notifications will be silently disabled. |  | yes |\n| SEND_AWSNS | Set `SEND_AWSNS` to YES | YES | yes |\n| [AWSSNS_MESSAGE_FORMAT](#option-awssns-message-format) | Set `AWSSNS_MESSAGE_FORMAT` to to the string that you want the alert to be sent into. | ${status} on ${host} at ${date}: ${chart} ${value_string} | yes |\n| [DEFAULT_RECIPIENT_AWSSNS](#option-default-recipient-awssns) | Set `DEFAULT_RECIPIENT_AWSSNS` to the Topic ARN you noted down upon creating the Topic. |  | yes |\n\n<a id="option-awssns-message-format"></a>\n##### AWSSNS_MESSAGE_FORMAT\n\nThe supported variables are:\n\n| Variable name               | Description                                                                      |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}`                  | Like "name = value units"                                                        |\n| `${status_message}`         | Like "needs attention", "recovered", "is critical"                               |\n| `${severity}`               | Like "Escalated to CRITICAL", "Recovered from WARNING"                           |\n| `${raised_for}`             | Like "(alarm was raised for 10 minutes)"                                         |\n| `${host}`                   | The host generated this event                                                    |\n| `${url_host}`               | Same as ${host} but URL encoded                                                  |\n| `${unique_id}`              | The unique id of this event                                                      |\n| `${alarm_id}`               | The unique id of the alarm that generated this event                             |\n| `${event_id}`               | The incremental id of the event, for this alarm id                               |\n| `${when}`                   | The timestamp this event occurred                                                |\n| `${name}`                   | The name of the alarm, as given in netdata health.d entries                      |\n| `${url_name}`               | Same as ${name} but URL encoded                                                  |\n| `${chart}`                  | The name of the chart (type.id)                                                  |\n| `${url_chart}`              | Same as ${chart} but URL encoded                                                 |\n| `${status}`                 | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}`             | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}`                  | The current value of the alarm                                                   |\n| `${old_value}`              | The previous value of the alarm                                                  |\n| `${src}`                    | The line number and file the alarm has been configured                           |\n| `${duration}`               | The duration in seconds of the previous alarm state                              |\n| `${duration_txt}`           | Same as ${duration} for humans                                                   |\n| `${non_clear_duration}`     | The total duration in seconds this is/was non-clear                              |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans                                         |\n| `${units}`                  | The units of the value                                                           |\n| `${info}`                   | A short description of the alarm                                                 |\n| `${value_string}`           | Friendly value (with units)                                                      |\n| `${old_value_string}`       | Friendly old value (with units)                                                  |\n| `${image}`                  | The URL of an image to represent the status of the alarm                         |\n| `${color}`                  | A color in  AABBCC format for the alarm                                          |\n| `${goto_url}`               | The URL the user can click to see the netdata dashboard                          |\n| `${calc_expression}`        | The expression evaluated to provide the value for the alarm                      |\n| `${calc_param_values}`      | The value of the variables in the evaluated expression                           |\n| `${total_warnings}`         | The total number of alarms in WARNING state on the host                          |\n| `${total_critical}`         | The total number of alarms in CRITICAL state on the host                         |\n\n\n<a id="option-default-recipient-awssns"></a>\n##### DEFAULT_RECIPIENT_AWSSNS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_awssns[sysadmin]="arn:aws:sns:us-east-2:123456789012:Systems"\nrole_recipients_awssns[domainadmin]="arn:aws:sns:us-east-2:123456789012:Domains"\nrole_recipients_awssns[dba]="arn:aws:sns:us-east-2:123456789012:Databases"\nrole_recipients_awssns[webmaster]="arn:aws:sns:us-east-2:123456789012:Development"\nrole_recipients_awssns[proxyadmin]="arn:aws:sns:us-east-2:123456789012:Proxy"\nrole_recipients_awssns[sitemgr]="arn:aws:sns:us-east-2:123456789012:Sites"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\nAn example working configuration would be:\n\n```yaml\n```text\n#------------------------------------------------------------------------------\n# Amazon SNS notifications\n\nSEND_AWSSNS="YES"\nAWSSNS_MESSAGE_FORMAT="${status} on ${host} at ${date}: ${chart} ${value_string}"\nDEFAULT_RECIPIENT_AWSSNS="arn:aws:sns:us-east-2:123456789012:MyTopic"\n```\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/awssns/metadata.yaml"},{id:"notify-custom",meta:{name:"Custom",link:"",categories:["notify.agent"],icon_filename:"custom.png"},keywords:["custom"],overview:"# Custom\n\nNetdata Agent's alert notification feature allows you to send custom notifications to any endpoint you choose.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_CUSTOM | Set `SEND_CUSTOM` to YES | YES | yes |\n| [DEFAULT_RECIPIENT_CUSTOM](#option-default-recipient-custom) | This value is dependent on how you handle the `${to}` variable inside the `custom_sender()` function. |  | yes |\n| [custom_sender()](#option-custom-sender) | You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the function in this configuration file. |  | no |\n\n<a id="option-default-recipient-custom"></a>\n##### DEFAULT_RECIPIENT_CUSTOM\n\nAll roles will default to this variable if left unconfigured. You can edit `DEFAULT_RECIPIENT_CUSTOM` with the variable you want, in the following entries at the bottom of the same file:\n```\nrole_recipients_custom[sysadmin]="systems"\nrole_recipients_custom[domainadmin]="domains"\nrole_recipients_custom[dba]="databases systems"\nrole_recipients_custom[webmaster]="marketing development"\nrole_recipients_custom[proxyadmin]="proxy-admin"\nrole_recipients_custom[sitemgr]="sites"\n```\n\n\n<a id="option-custom-sender"></a>\n##### custom_sender()\n\nThe following is a sample custom_sender() function in health_alarm_notify.conf, to send an SMS via an imaginary HTTPS endpoint to the SMS gateway:\n```\ncustom_sender() {\n    # example human readable SMS\n    local msg="${host} ${status_message}: ${alarm} ${raised_for}"\n\n    # limit it to 160 characters and encode it for use in a URL\n    urlencode "${msg:0:160}" >/dev/null; msg="${REPLY}"\n\n    # a space separated list of the recipients to send alarms to\n    to="${1}"\n\n    for phone in ${to}; do\n      httpcode=$(docurl -X POST \\\n            --data-urlencode "From=XXX" \\\n            --data-urlencode "To=${phone}" \\\n            --data-urlencode "Body=${msg}" \\\n            -u "${accountsid}:${accounttoken}" \\\n        https://domain.website.com/)\n\n      if [ "${httpcode}" = "200" ]; then\n        info "sent custom notification ${msg} to ${phone}"\n        sent=$((sent + 1))\n      else\n        error "failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}."\n      fi\n    done\n}\n```\n\nThe supported variables that you can use for the function\'s `msg` variable are:\n\n| Variable name               | Description                                                                      |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}`                  | Like "name = value units"                                                        |\n| `${status_message}`         | Like "needs attention", "recovered", "is critical"                               |\n| `${severity}`               | Like "Escalated to CRITICAL", "Recovered from WARNING"                           |\n| `${raised_for}`             | Like "(alarm was raised for 10 minutes)"                                         |\n| `${host}`                   | The host generated this event                                                    |\n| `${url_host}`               | Same as ${host} but URL encoded                                                  |\n| `${unique_id}`              | The unique id of this event                                                      |\n| `${alarm_id}`               | The unique id of the alarm that generated this event                             |\n| `${event_id}`               | The incremental id of the event, for this alarm id                               |\n| `${when}`                   | The timestamp this event occurred                                                |\n| `${name}`                   | The name of the alarm, as given in netdata health.d entries                      |\n| `${url_name}`               | Same as ${name} but URL encoded                                                  |\n| `${chart}`                  | The name of the chart (type.id)                                                  |\n| `${url_chart}`              | Same as ${chart} but URL encoded                                                 |\n| `${status}`                 | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}`             | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}`                  | The current value of the alarm                                                   |\n| `${old_value}`              | The previous value of the alarm                                                  |\n| `${src}`                    | The line number and file the alarm has been configured                           |\n| `${duration}`               | The duration in seconds of the previous alarm state                              |\n| `${duration_txt}`           | Same as ${duration} for humans                                                   |\n| `${non_clear_duration}`     | The total duration in seconds this is/was non-clear                              |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans                                         |\n| `${units}`                  | The units of the value                                                           |\n| `${info}`                   | A short description of the alarm                                                 |\n| `${value_string}`           | Friendly value (with units)                                                      |\n| `${old_value_string}`       | Friendly old value (with units)                                                  |\n| `${image}`                  | The URL of an image to represent the status of the alarm                         |\n| `${color}`                  | A color in  AABBCC format for the alarm                                          |\n| `${goto_url}`               | The URL the user can click to see the netdata dashboard                          |\n| `${calc_expression}`        | The expression evaluated to provide the value for the alarm                      |\n| `${calc_param_values}`      | The value of the variables in the evaluated expression                           |\n| `${total_warnings}`         | The total number of alarms in WARNING state on the host                          |\n| `${total_critical}`         | The total number of alarms in CRITICAL state on the host                         |\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# custom notifications\n\nSEND_CUSTOM="YES"\nDEFAULT_RECIPIENT_CUSTOM=""\n\n# The custom_sender() is a custom function to do whatever you need to do\ncustom_sender() {\n    # example human readable SMS\n    local msg="${host} ${status_message}: ${alarm} ${raised_for}"\n\n    # limit it to 160 characters and encode it for use in a URL\n    urlencode "${msg:0:160}" >/dev/null; msg="${REPLY}"\n\n    # a space separated list of the recipients to send alarms to\n    to="${1}"\n\n    for phone in ${to}; do\n      httpcode=$(docurl -X POST \\\n            --data-urlencode "From=XXX" \\\n            --data-urlencode "To=${phone}" \\\n            --data-urlencode "Body=${msg}" \\\n            -u "${accountsid}:${accounttoken}" \\\n        https://domain.website.com/)\n\n      if [ "${httpcode}" = "200" ]; then\n        info "sent custom notification ${msg} to ${phone}"\n        sent=$((sent + 1))\n      else\n        error "failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}."\n      fi\n    done\n}\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/custom/metadata.yaml"},{id:"notify-discord",meta:{name:"Discord",link:"https://discord.com/",categories:["notify.agent"],icon_filename:"discord.png"},keywords:["Discord"],overview:"# Discord\n\nSend notifications to Discord using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more Discord channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_DISCORD | Set `SEND_DISCORD` to YES | YES | yes |\n| DISCORD_WEBHOOK_URL | set `DISCORD_WEBHOOK_URL` to your webhook URL. |  | yes |\n| [DEFAULT_RECIPIENT_DISCORD](#option-default-recipient-discord) | Set `DEFAULT_RECIPIENT_DISCORD` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. |  | yes |\n\n<a id="option-default-recipient-discord"></a>\n##### DEFAULT_RECIPIENT_DISCORD\n\nAll roles will default to this variable if left unconfigured.\nYou can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_discord[sysadmin]="systems"\nrole_recipients_discord[domainadmin]="domains"\nrole_recipients_discord[dba]="databases systems"\nrole_recipients_discord[webmaster]="marketing development"\nrole_recipients_discord[proxyadmin]="proxy-admin"\nrole_recipients_discord[sitemgr]="sites"\n```\n\nThe values you provide should already exist as Discord channels in your server.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# discord (discordapp.com) global notification options\n\nSEND_DISCORD="YES"\nDISCORD_WEBHOOK_URL="https://discord.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"\nDEFAULT_RECIPIENT_DISCORD="alerts"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/discord/metadata.yaml"},{id:"notify-dynatrace",meta:{name:"Dynatrace",link:"https://dynatrace.com",categories:["notify.agent"],icon_filename:"dynatrace.svg"},keywords:["Dynatrace"],overview:"# Dynatrace\n\nDynatrace allows you to receive notifications using their Events REST API. See the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event) about POSTing an event in the Events API for more details.\nYou can send notifications to Dynatrace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- A Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts. The Dynatrace server should be with protocol prefixed (http:// or https://), for example: https://monitor.example.com.\n- An API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API. See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.\n- An API Space. This is the URL part of the page you have access in order to generate the API Token. For example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n- A Server Tag. To generate one on your Dynatrace Server, go to Settings --\x3e Tags --\x3e Manually applied tags and create the Tag. The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_DYNATRACE | Set `SEND_DYNATRACE` to YES | YES | yes |\n| DYNATRACE_SERVER | Set `DYNATRACE_SERVER` to the Dynatrace server with the protocol prefix, for example `https://monitor.example.com`. |  | yes |\n| DYNATRACE_TOKEN | Set `DYNATRACE_TOKEN` to your Dynatrace API authentication token |  | yes |\n| [DYNATRACE_SPACE](#option-dynatrace-space) | Set `DYNATRACE_SPACE` to the API Space, it is the URL part of the page you have access in order to generate the API Token. |  | yes |\n| DYNATRACE_TAG_VALUE | Set `DYNATRACE_TAG_VALUE` to your Dynatrace Server Tag. |  | yes |\n| DYNATRACE_ANNOTATION_TYPE | `DYNATRACE_ANNOTATION_TYPE` can be left to its default value Netdata Alarm, but you can change it to better fit your needs. | Netdata Alarm | no |\n| [DYNATRACE_EVENT](#option-dynatrace-event) | Set `DYNATRACE_EVENT` to the Dynatrace eventType you want. | Netdata Alarm | no |\n\n<a id="option-dynatrace-space"></a>\n##### DYNATRACE_SPACE\n\nFor example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n\n\n<a id="option-dynatrace-event"></a>\n##### DYNATRACE_EVENT\n\n`AVAILABILITY_EVENT`, `CUSTOM_ALERT`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, `CUSTOM_DEPLOYMENT`, `CUSTOM_INFO`, `ERROR_EVENT`,\n`MARKED_FOR_TERMINATION`, `PERFORMANCE_EVENT`, `RESOURCE_CONTENTION_EVENT`.\nYou can read more [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event#request-body-objects).\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Dynatrace global notification options\n\nSEND_DYNATRACE="YES"\nDYNATRACE_SERVER="https://monitor.example.com"\nDYNATRACE_TOKEN="XXXXXXX"\nDYNATRACE_SPACE="2a93fe0e-4cd5-469a-9d0d-1a064235cfce"\nDYNATRACE_TAG_VALUE="SERVERTAG"\nDYNATRACE_ANNOTATION_TYPE="Netdata Alert"\nDYNATRACE_EVENT="AVAILABILITY_EVENT"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/dynatrace/metadata.yaml"},{id:"notify-email",meta:{name:"Email",link:"",categories:["notify.agent"],icon_filename:"email.png"},keywords:["email"],overview:"# Email\n\nSend notifications via Email using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- A working sendmail command is required for email alerts to work. Almost all MTAs provide a sendmail interface. Netdata sends all emails as user netdata, so make sure your sendmail works for local users.\n- Access to the terminal where Netdata Agent is running\n- When running Netdata with Docker Compose the emails are sent with `msmtp`, and you need a basic configuration for it to work. \n  \n  - Add a [msmtprc](https://marlam.de/msmtp/msmtprc.txt) config file on your Docker root folder, and edit it according to your needs.\n  - Link it into your Netdata container with this:\n  \n    ```yaml\n        volumes:\n          - /path/to/netdata-docker/msmtprc:/etc/msmtprc:ro\n    ```\n  \n  - Update your container with `docker compose up -d`.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| EMAIL_SENDER | You can change `EMAIL_SENDER` to the email address sending the notifications. | netdata | no |\n| SEND_EMAIL | Set `SEND_EMAIL` to YES | YES | yes |\n| [DEFAULT_RECIPIENT_EMAIL](#option-default-recipient-email) | Set `DEFAULT_RECIPIENT_EMAIL` to the email address you want the email to be sent by default. You can define multiple email addresses like this: `alarms@example.com` `systems@example.com`. | root | yes |\n\n<a id="option-default-recipient-email"></a>\n##### DEFAULT_RECIPIENT_EMAIL\n\nAll roles will default to this variable if left unconfigured.\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_email[sysadmin]="systems@example.com"\nrole_recipients_email[domainadmin]="domains@example.com"\nrole_recipients_email[dba]="databases@example.com systems@example.com"\nrole_recipients_email[webmaster]="marketing@example.com development@example.com"\nrole_recipients_email[proxyadmin]="proxy-admin@example.com"\nrole_recipients_email[sitemgr]="sites@example.com"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# email global notification options\n\nEMAIL_SENDER="example@domain.com"\nSEND_EMAIL="YES"\nDEFAULT_RECIPIENT_EMAIL="recipient@example.com"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/email/metadata.yaml"},{id:"notify-flock",meta:{name:"Flock",link:"https://support.flock.com/",categories:["notify.agent"],icon_filename:"flock.png"},keywords:["Flock"],overview:"# Flock\n\nSend notifications to Flock using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by flock.com. You can use the same on all your Netdata servers (or you can have multiple if you like). Read more about flock webhooks and how to get one [here](https://admin.flock.com/webhooks).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_FLOCK | Set `SEND_FLOCK` to YES | YES | yes |\n| FLOCK_WEBHOOK_URL | set `FLOCK_WEBHOOK_URL` to your webhook URL. |  | yes |\n| [DEFAULT_RECIPIENT_FLOCK](#option-default-recipient-flock) | Set `DEFAULT_RECIPIENT_FLOCK` to  the Flock channel you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. |  | yes |\n\n<a id="option-default-recipient-flock"></a>\n##### DEFAULT_RECIPIENT_FLOCK\n\nYou can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_flock[sysadmin]="systems"\nrole_recipients_flock[domainadmin]="domains"\nrole_recipients_flock[dba]="databases systems"\nrole_recipients_flock[webmaster]="marketing development"\nrole_recipients_flock[proxyadmin]="proxy-admin"\nrole_recipients_flock[sitemgr]="sites"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# flock (flock.com) global notification options\n\nSEND_FLOCK="YES"\nFLOCK_WEBHOOK_URL="https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"\nDEFAULT_RECIPIENT_FLOCK="alarms"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/flock/metadata.yaml"},{id:"notify-gotify",meta:{name:"Gotify",link:"https://gotify.net/",categories:["notify.agent"],icon_filename:"gotify.png"},keywords:["gotify"],overview:"# Gotify\n\n[Gotify](https://gotify.net/) is a self-hosted push notification service created for sending and receiving messages in real time.\nYou can send alerts to your Gotify instance using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- An application token. You can generate a new token in the Gotify Web UI.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_GOTIFY | Set `SEND_GOTIFY` to YES | YES | yes |\n| GOTIFY_APP_TOKEN | set `GOTIFY_APP_TOKEN` to the app token you generated. |  | yes |\n| GOTIFY_APP_URL | Set `GOTIFY_APP_URL` to point to your Gotify instance, for example `https://push.example.domain/` |  | yes |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\nSEND_GOTIFY="YES"\nGOTIFY_APP_TOKEN="XXXXXXXXXXXXXXX"\nGOTIFY_APP_URL="https://push.example.domain/"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/gotify/metadata.yaml"},{id:"notify-ilert",meta:{name:"ilert",link:"https://www.ilert.com/",categories:["notify.agent"],icon_filename:"ilert.svg"},keywords:["ilert"],overview:"# ilert\n\nilert is an alerting and incident management tool. It helps teams reduce response times by enhancing monitoring and ticketing tools with reliable alerts, automatic escalations, on-call schedules, and features for incident response, communication, and status updates.\nSending notification to ilert via Netdata's Agent alert notification feature includes links, images and resolving of corresponding alerts.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- A Netdata alert source in ilert. You can create a [Netdata alert source](https://docs.ilert.com/inbound-integrations/netdata) in [ilert](https://www.ilert.com/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_ILERT | Set `SEND_ILERT` to YES | YES | yes |\n| ILERT_ALERT_SOURCE_URL | Set `ILERT_ALERT_SOURCE_URL` to your Netdata alert source url in ilert. |  | yes |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\nSEND_ILERT="YES"\nILERT_ALERT_SOURCE_URL="https://api.ilert.com/api/v1/events/netdata/{API-KEY}"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/ilert/metadata.yaml"},{id:"notify-irc",meta:{name:"IRC",link:"",categories:["notify.agent"],icon_filename:"irc.png"},keywords:["IRC"],overview:"# IRC\n\nSend notifications to IRC using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- The `nc` utility. You can set the path to it, or Netdata will search for it in your system `$PATH`.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| [nc path](#option-nc-path) | Set the path for nc, otherwise Netdata will search for it in your system $PATH |  | yes |\n| SEND_IRC | Set `SEND_IRC` YES. | YES | yes |\n| IRC_NETWORK | Set `IRC_NETWORK` to the IRC network which your preferred channels belong to. |  | yes |\n| IRC_PORT | Set `IRC_PORT` to the IRC port to which a connection will occur. |  | no |\n| IRC_NICKNAME | Set `IRC_NICKNAME` to the IRC nickname which is required to send the notification. It must not be an already registered name as the connection\'s MODE is defined as a guest. |  | yes |\n| IRC_REALNAME | Set `IRC_REALNAME` to the IRC realname which is required in order to make the connection. |  | yes |\n| [DEFAULT_RECIPIENT_IRC](#option-default-recipient-irc) | You can have different channels per role, by editing `DEFAULT_RECIPIENT_IRC` with the channel you want |  | yes |\n\n<a id="option-nc-path"></a>\n##### nc path\n\n```sh\n#------------------------------------------------------------------------------\n# external commands\n#\n# The full path of the nc command.\n# If empty, the system $PATH will be searched for it.\n# If not found, irc notifications will be silently disabled.\nnc="/usr/bin/nc"\n```\n\n\n<a id="option-default-recipient-irc"></a>\n##### DEFAULT_RECIPIENT_IRC\n\nThe `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_irc[sysadmin]="#systems"\nrole_recipients_irc[domainadmin]="#domains"\nrole_recipients_irc[dba]="#databases #systems"\nrole_recipients_irc[webmaster]="#marketing #development"\nrole_recipients_irc[proxyadmin]="#proxy-admin"\nrole_recipients_irc[sitemgr]="#sites"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# irc notification options\n#\nSEND_IRC="YES"\nDEFAULT_RECIPIENT_IRC="#system-alarms"\nIRC_NETWORK="irc.freenode.net"\nIRC_NICKNAME="netdata-alarm-user"\nIRC_REALNAME="netdata-user"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/irc/metadata.yaml"},{id:"notify-kavenegar",meta:{name:"Kavenegar",link:"https://kavenegar.com/",categories:["notify.agent"],icon_filename:"kavenegar.png"},keywords:["Kavenegar"],overview:"# Kavenegar\n\n[Kavenegar](https://kavenegar.com/) as service for software developers, based in Iran, provides send and receive SMS, calling voice by using its APIs.\nYou can send notifications to Kavenegar using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- The APIKEY and Sender from http://panel.kavenegar.com/client/setting/account\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_KAVENEGAR | Set `SEND_KAVENEGAR` to YES | YES | yes |\n| KAVENEGAR_API_KEY | Set `KAVENEGAR_API_KEY` to your API key. |  | yes |\n| KAVENEGAR_SENDER | Set `KAVENEGAR_SENDER` to the value of your Sender. |  | yes |\n| [DEFAULT_RECIPIENT_KAVENEGAR](#option-default-recipient-kavenegar) | Set `DEFAULT_RECIPIENT_KAVENEGAR` to the SMS recipient you want the alert notifications to be sent to. You can define multiple recipients like this: 09155555555 09177777777. |  | yes |\n\n<a id="option-default-recipient-kavenegar"></a>\n##### DEFAULT_RECIPIENT_KAVENEGAR\n\nAll roles will default to this variable if lest unconfigured.\n\nYou can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_kavenegar[sysadmin]="09100000000"\nrole_recipients_kavenegar[domainadmin]="09111111111"\nrole_recipients_kavenegar[dba]="0922222222"\nrole_recipients_kavenegar[webmaster]="0933333333"\nrole_recipients_kavenegar[proxyadmin]="0944444444"\nrole_recipients_kavenegar[sitemgr]="0955555555"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Kavenegar (Kavenegar.com) SMS options\n\nSEND_KAVENEGAR="YES"\nKAVENEGAR_API_KEY="XXXXXXXXXXXX"\nKAVENEGAR_SENDER="YYYYYYYY"\nDEFAULT_RECIPIENT_KAVENEGAR="0912345678"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/kavenegar/metadata.yaml"},{id:"notify-matrix",meta:{name:"Matrix",link:"https://spec.matrix.org/unstable/push-gateway-api/",categories:["notify.agent"],icon_filename:"matrix.svg"},keywords:["Matrix"],overview:"# Matrix\n\nSend notifications to Matrix network rooms using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- The url of the homeserver (`https://homeserver:port`).\n- Credentials for connecting to the homeserver, in the form of a valid access token for your account (or for a dedicated notification account). These tokens usually don\'t expire.\n- The Room ids that you want to sent the notification to.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_MATRIX | Set `SEND_MATRIX` to YES | YES | yes |\n| MATRIX_HOMESERVER | set `MATRIX_HOMESERVER` to the URL of the Matrix homeserver. |  | yes |\n| [MATRIX_ACCESSTOKEN](#option-matrix-accesstoken) | Set `MATRIX_ACCESSTOKEN` to the access token from your Matrix account. |  | yes |\n| [DEFAULT_RECIPIENT_MATRIX](#option-default-recipient-matrix) | Set `DEFAULT_RECIPIENT_MATRIX`  to the Rooms you want the alert notifications to be sent to. The format is `!roomid:homeservername`. |  | yes |\n\n<a id="option-matrix-accesstoken"></a>\n##### MATRIX_ACCESSTOKEN\n\nTo obtain the access token, you can use the following curl command:\n```\ncurl -XPOST -d \'{"type":"m.login.password", "user":"example", "password":"wordpass"}\' "https://homeserver:8448/_matrix/client/r0/login"\n```\n\n\n<a id="option-default-recipient-matrix"></a>\n##### DEFAULT_RECIPIENT_MATRIX\n\nThe Room ids are unique identifiers and can be obtained from the Room settings in a Matrix client (e.g. Riot).\n\nYou can define multiple Rooms like this: `!roomid1:homeservername` `!roomid2:homeservername`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different Rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_matrix[sysadmin]="!roomid1:homeservername"\nrole_recipients_matrix[domainadmin]="!roomid2:homeservername"\nrole_recipients_matrix[dba]="!roomid3:homeservername"\nrole_recipients_matrix[webmaster]="!roomid4:homeservername"\nrole_recipients_matrix[proxyadmin]="!roomid5:homeservername"\nrole_recipients_matrix[sitemgr]="!roomid6:homeservername"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Matrix notifications\n\nSEND_MATRIX="YES"\nMATRIX_HOMESERVER="https://matrix.org:8448"\nMATRIX_ACCESSTOKEN="XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"\nDEFAULT_RECIPIENT_MATRIX="!XXXXXXXXXXXX:matrix.org"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/matrix/metadata.yaml"},{id:"notify-messagebird",meta:{name:"MessageBird",link:"https://messagebird.com/",categories:["notify.agent"],icon_filename:"messagebird.svg"},keywords:["MessageBird"],overview:"# MessageBird\n\nSend notifications to MessageBird using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- An access key under \'API ACCESS (REST)\' (you will want a live key), you can read more [here](https://developers.messagebird.com/quickstarts/sms/test-credits-api-keys/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_MESSAGEBIRD | Set `SEND_MESSAGEBIRD` to YES | YES | yes |\n| MESSAGEBIRD_ACCESS_KEY | Set `MESSAGEBIRD_ACCESS_KEY` to your API key. |  | yes |\n| MESSAGEBIRD_NUMBER | Set `MESSAGEBIRD_NUMBER` to the MessageBird number you want to use for the alert. |  | yes |\n| [DEFAULT_RECIPIENT_MESSAGEBIRD](#option-default-recipient-messagebird) | Set `DEFAULT_RECIPIENT_MESSAGEBIRD` to the number you want the alert notification to be sent as an SMS. You can define multiple recipients like this: +15555555555 +17777777777. |  | yes |\n\n<a id="option-default-recipient-messagebird"></a>\n##### DEFAULT_RECIPIENT_MESSAGEBIRD\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_messagebird[sysadmin]="+15555555555"\nrole_recipients_messagebird[domainadmin]="+15555555556"\nrole_recipients_messagebird[dba]="+15555555557"\nrole_recipients_messagebird[webmaster]="+15555555558"\nrole_recipients_messagebird[proxyadmin]="+15555555559"\nrole_recipients_messagebird[sitemgr]="+15555555550"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Messagebird (messagebird.com) SMS options\n\nSEND_MESSAGEBIRD="YES"\nMESSAGEBIRD_ACCESS_KEY="XXXXXXXX"\nMESSAGEBIRD_NUMBER="XXXXXXX"\nDEFAULT_RECIPIENT_MESSAGEBIRD="+15555555555"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/messagebird/metadata.yaml"},{id:"notify-ntfy",meta:{name:"ntfy",link:"https://ntfy.sh/",categories:["notify.agent"],icon_filename:"ntfy.svg"},keywords:["ntfy"],overview:"# ntfy\n\n[ntfy](https://ntfy.sh/) (pronounce: notify) is a simple HTTP-based [pub-sub](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) notification service. It allows you to send notifications to your phone or desktop via scripts from any computer, entirely without signup, cost or setup. It's also [open source](https://github.com/binwiederhier/ntfy) if you want to run your own server.\nYou can send alerts to an ntfy server using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- (Optional) A [self-hosted ntfy server](https://docs.ntfy.sh/faq/#can-i-self-host-it), in case you don\'t want to use https://ntfy.sh\n- A new [topic](https://ntfy.sh/#subscribe) for the notifications to be published to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_NTFY | Set `SEND_NTFY` to YES | YES | yes |\n| [DEFAULT_RECIPIENT_NTFY](#option-default-recipient-ntfy) | URL formed by the server-topic combination you want the alert notifications to be sent to. Unless hosting your own server, the server should always be set to https://ntfy.sh. |  | yes |\n| [NTFY_USERNAME](#option-ntfy-username) | The username for netdata to use to authenticate with an ntfy server. |  | no |\n| [NTFY_PASSWORD](#option-ntfy-password) | The password for netdata to use to authenticate with an ntfy server. |  | no |\n| [NTFY_ACCESS_TOKEN](#option-ntfy-access-token) | The access token for netdata to use to authenticate with an ntfy server. |  | no |\n\n<a id="option-default-recipient-ntfy"></a>\n##### DEFAULT_RECIPIENT_NTFY\n\nYou can define multiple recipient URLs like this: `https://SERVER1/TOPIC1` `https://SERVER2/TOPIC2`\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_ntfy[sysadmin]="https://SERVER1/TOPIC1"\nrole_recipients_ntfy[domainadmin]="https://SERVER2/TOPIC2"\nrole_recipients_ntfy[dba]="https://SERVER3/TOPIC3"\nrole_recipients_ntfy[webmaster]="https://SERVER4/TOPIC4"\nrole_recipients_ntfy[proxyadmin]="https://SERVER5/TOPIC5"\nrole_recipients_ntfy[sitemgr]="https://SERVER6/TOPIC6"\n```\n\n\n<a id="option-ntfy-username"></a>\n##### NTFY_USERNAME\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n<a id="option-ntfy-password"></a>\n##### NTFY_PASSWORD\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n<a id="option-ntfy-access-token"></a>\n##### NTFY_ACCESS_TOKEN\n\nThis can be used in place of `NTFY_USERNAME` and `NTFY_PASSWORD` to authenticate with a self-hosted ntfy instance. See [access tokens](https://docs.ntfy.sh/config/?h=access+to#access-tokens) for details.\nEnsure that the token user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\nSEND_NTFY="YES"\nDEFAULT_RECIPIENT_NTFY="https://ntfy.sh/netdata-X7seHg7d3Tw9zGOk https://ntfy.sh/netdata-oIPm4IK1IlUtlA30"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/ntfy/metadata.yaml"},{id:"notify-opsgenie",meta:{name:"OpsGenie",link:"https://www.atlassian.com/software/opsgenie",categories:["notify.agent"],icon_filename:"opsgenie.png"},keywords:["OpsGenie"],overview:"# OpsGenie\n\nOpsgenie is an alerting and incident response tool. It is designed to group and filter alarms, build custom routing rules for on-call teams, and correlate deployments and commits to incidents.\nYou can send notifications to Opsgenie using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- An Opsgenie integration. You can create an [integration](https://docs.opsgenie.com/docs/api-integration) in the [Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_OPSGENIE | Set `SEND_OPSGENIE` to YES | YES | yes |\n| OPSGENIE_API_KEY | Set `OPSGENIE_API_KEY` to your API key. |  | yes |\n| OPSGENIE_API_URL | Set `OPSGENIE_API_URL` to the corresponding URL if required, for example there are region-specific API URLs such as `https://eu.api.opsgenie.com`. | https://api.opsgenie.com | no |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\nSEND_OPSGENIE="YES"\nOPSGENIE_API_KEY="11111111-2222-3333-4444-555555555555"\nOPSGENIE_API_URL=""\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/opsgenie/metadata.yaml"},{id:"notify-pagerduty",meta:{name:"PagerDuty",link:"https://www.pagerduty.com/",categories:["notify.agent"],icon_filename:"pagerduty.png"},keywords:["PagerDuty"],overview:"# PagerDuty\n\nPagerDuty is an enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times.\nYou can send notifications to PagerDuty using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- An installation of the [PagerDuty](https://www.pagerduty.com/docs/guides/agent-install-guide/) Agent on the node running the Netdata Agent\n- A PagerDuty Generic API service using either the `Events API v2` or `Events API v1`\n- [Add a new service](https://support.pagerduty.com/docs/services-and-integrations#section-configuring-services-and-integrations) to PagerDuty. Click Use our API directly and select either `Events API v2` or `Events API v1`. Once you finish creating the service, click on the Integrations tab to find your Integration Key.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_PD | Set `SEND_PD` to YES | YES | yes |\n| [DEFAULT_RECIPIENT_PD](#option-default-recipient-pd) | Set `DEFAULT_RECIPIENT_PD` to the PagerDuty service key you want the alert notifications to be sent to. You can define multiple service keys like this: `pd_service_key_1` `pd_service_key_2`. |  | yes |\n\n<a id="option-default-recipient-pd"></a>\n##### DEFAULT_RECIPIENT_PD\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pd[sysadmin]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa"\nrole_recipients_pd[domainadmin]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb"\nrole_recipients_pd[dba]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc"\nrole_recipients_pd[webmaster]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxd"\nrole_recipients_pd[proxyadmin]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe"\nrole_recipients_pd[sitemgr]="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxf"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pagerduty.com notification options\n\nSEND_PD="YES"\nDEFAULT_RECIPIENT_PD="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"\nUSE_PD_VERSION="2"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/pagerduty/metadata.yaml"},{id:"notify-prowl",meta:{name:"Prowl",link:"https://www.prowlapp.com/",categories:["notify.agent"],icon_filename:"prowl.png"},keywords:["Prowl"],overview:"# Prowl\n\nSend notifications to Prowl using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- Because of how Netdata integrates with Prowl, there is a hard limit of at most 1000 notifications per hour (starting from the first notification sent). Any alerts beyond the first thousand in an hour will be dropped.\n- Warning messages will be sent with the 'High' priority, critical messages will be sent with the 'Emergency' priority, and all other messages will be sent with the normal priority. Opening the notification's associated URL will take you to the Netdata dashboard of the system that issued the alert, directly to the chart that it triggered on.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- A Prowl API key, which can be requested through the Prowl website after registering\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_PROWL | Set `SEND_PROWL` to YES | YES | yes |\n| [DEFAULT_RECIPIENT_PROWL](#option-default-recipient-prowl) | Set `DEFAULT_RECIPIENT_PROWL` to  the Prowl API key you want the alert notifications to be sent to. You can define multiple API keys like this: `APIKEY1`, `APIKEY2`. |  | yes |\n\n<a id="option-default-recipient-prowl"></a>\n##### DEFAULT_RECIPIENT_PROWL\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_prowl[sysadmin]="AAAAAAAA"\nrole_recipients_prowl[domainadmin]="BBBBBBBBB"\nrole_recipients_prowl[dba]="CCCCCCCCC"\nrole_recipients_prowl[webmaster]="DDDDDDDDDD"\nrole_recipients_prowl[proxyadmin]="EEEEEEEEEE"\nrole_recipients_prowl[sitemgr]="FFFFFFFFFF"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# iOS Push Notifications\n\nSEND_PROWL="YES"\nDEFAULT_RECIPIENT_PROWL="XXXXXXXXXX"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/prowl/metadata.yaml"},{id:"notify-pushbullet",meta:{name:"Pushbullet",link:"https://www.pushbullet.com/",categories:["notify.agent"],icon_filename:"pushbullet.png"},keywords:["Pushbullet"],overview:"# Pushbullet\n\nSend notifications to Pushbullet using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- A Pushbullet access token that can be created in your [account settings](https://www.pushbullet.com/#settings/account).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| Send_PUSHBULLET | Set `Send_PUSHBULLET` to YES | YES | yes |\n| PUSHBULLET_ACCESS_TOKEN | set `PUSHBULLET_ACCESS_TOKEN` to the access token you generated. |  | yes |\n| [DEFAULT_RECIPIENT_PUSHBULLET](#option-default-recipient-pushbullet) | Set `DEFAULT_RECIPIENT_PUSHBULLET` to the email (e.g. `example@domain.com`) or the channel tag (e.g. `#channel`) you want the alert notifications to be sent to. |  | yes |\n\n<a id="option-default-recipient-pushbullet"></a>\n##### DEFAULT_RECIPIENT_PUSHBULLET\n\nYou can define multiple entries like this: user1@email.com user2@email.com.\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pushbullet[sysadmin]="user1@email.com"\nrole_recipients_pushbullet[domainadmin]="user2@mail.com"\nrole_recipients_pushbullet[dba]="#channel1"\nrole_recipients_pushbullet[webmaster]="#channel2"\nrole_recipients_pushbullet[proxyadmin]="user3@mail.com"\nrole_recipients_pushbullet[sitemgr]="user4@mail.com"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushbullet (pushbullet.com) push notification options\n\nSEND_PUSHBULLET="YES"\nPUSHBULLET_ACCESS_TOKEN="XXXXXXXXX"\nDEFAULT_RECIPIENT_PUSHBULLET="admin1@example.com admin3@somemail.com #examplechanneltag #anotherchanneltag"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/pushbullet/metadata.yaml"},{id:"notify-pushover",meta:{name:"PushOver",link:"https://pushover.net/",categories:["notify.agent"],icon_filename:"pushover.png"},keywords:["PushOver"],overview:"# PushOver\n\nSend notification to Pushover using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n- Netdata will send warning messages with priority 0 and critical messages with priority 1.\n- Pushover allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours.\n- All other notifications will be delivered silently.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- An Application token. You can use the same on all your Netdata servers.\n- A User token for each user you are going to send notifications to. This is the actual recipient of the notification.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_PUSHOVER | Set `SEND_PUSHOVER` to YES | YES | yes |\n| PUSHOVER_WEBHOOK_URL | set `PUSHOVER_WEBHOOK_URL` to your Pushover Application token. |  | yes |\n| [DEFAULT_RECIPIENT_PUSHOVER](#option-default-recipient-pushover) | Set `DEFAULT_RECIPIENT_PUSHOVER` the Pushover User token you want the alert notifications to be sent to. You can define multiple User tokens like this: `USERTOKEN1` `USERTOKEN2`. |  | yes |\n\n<a id="option-default-recipient-pushover"></a>\n##### DEFAULT_RECIPIENT_PUSHOVER\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pushover[sysadmin]="USERTOKEN1"\nrole_recipients_pushover[domainadmin]="USERTOKEN2"\nrole_recipients_pushover[dba]="USERTOKEN3 USERTOKEN4"\nrole_recipients_pushover[webmaster]="USERTOKEN5"\nrole_recipients_pushover[proxyadmin]="USERTOKEN6"\nrole_recipients_pushover[sitemgr]="USERTOKEN7"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushover (pushover.net) global notification options\n\nSEND_PUSHOVER="YES"\nPUSHOVER_APP_TOKEN="XXXXXXXXX"\nDEFAULT_RECIPIENT_PUSHOVER="USERTOKEN"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/pushover/metadata.yaml"},{id:"notify-rocketchat",meta:{name:"RocketChat",link:"https://rocket.chat/",categories:["notify.agent"],icon_filename:"rocketchat.png"},keywords:["RocketChat"],overview:"# RocketChat\n\nSend notifications to Rocket.Chat using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_ROCKETCHAT | Set `SEND_ROCKETCHAT` to `YES` | YES | yes |\n| ROCKETCHAT_WEBHOOK_URL | set `ROCKETCHAT_WEBHOOK_URL` to your webhook URL. |  | yes |\n| [DEFAULT_RECIPIENT_ROCKETCHAT](#option-default-recipient-rocketchat) | Set `DEFAULT_RECIPIENT_ROCKETCHAT` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. |  | yes |\n\n<a id="option-default-recipient-rocketchat"></a>\n##### DEFAULT_RECIPIENT_ROCKETCHAT\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_rocketchat[sysadmin]="systems"\nrole_recipients_rocketchat[domainadmin]="domains"\nrole_recipients_rocketchat[dba]="databases systems"\nrole_recipients_rocketchat[webmaster]="marketing development"\nrole_recipients_rocketchat[proxyadmin]="proxy_admin"\nrole_recipients_rocketchat[sitemgr]="sites"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# rocketchat (rocket.chat) global notification options\n\nSEND_ROCKETCHAT="YES"\nROCKETCHAT_WEBHOOK_URL="<your_incoming_webhook_url>"\nDEFAULT_RECIPIENT_ROCKETCHAT="monitoring_alarms"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/rocketchat/metadata.yaml"},{id:"notify-signl4",meta:{name:"SIGNL4",link:"https://www.signl4.com/",categories:["notify.agent"],icon_filename:"signl4.svg"},keywords:["signl4"],overview:"# SIGNL4\n\nSIGNL4 offers critical alerting, incident response and service dispatching for operating critical infrastructure. It alerts you persistently via app push, SMS text, voice calls, and email including tracking, escalation, on-call duty scheduling and collaboration.\nYou can send notifications to SIGNL4 using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- An inbound webhook in SIGNL4\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_SIGNL4 | Set `SEND_SIGNL4` to YES | YES | yes |\n| SIGNL4_WEBHOOK_URL | set `SIGNL4_WEBHOOK_URL` to your SIGNL4 webhook URL. |  | yes |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\nSEND_SIGNL4="YES"\nSIGNL4_WEBHOOK_URL="https://connect.signl4.com/webhook/xxxxxxxx"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/signl4/metadata.yaml"},{id:"notify-slack",meta:{name:"Slack",link:"https://slack.com/",categories:["notify.agent"],icon_filename:"slack.png"},keywords:["Slack"],overview:"# Slack\n\nSend notifications to a Slack workspace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Slack app along with an incoming webhook, read Slack\'s guide on the topic [here](https://api.slack.com/messaging/webhooks).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_SLACK | Set `SEND_SLACK` to YES | YES | yes |\n| SLACK_WEBHOOK_URL | set `SLACK_WEBHOOK_URL` to your Slack app\'s webhook URL. |  | yes |\n| DEFAULT_RECIPIENT_SLACK | Set `DEFAULT_RECIPIENT_SLACK` to the Slack channel your Slack app is set to send messages to. The syntax for channels is `#channel` or `channel`. |  | yes |\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# slack (slack.com) global notification options\n\nSEND_SLACK="YES"\nSLACK_WEBHOOK_URL="https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \nDEFAULT_RECIPIENT_SLACK="#alarms"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/slack/metadata.yaml"},{id:"notify-sms",meta:{name:"SMS",link:"http://smstools3.kekekasvi.com/",categories:["notify.agent"],icon_filename:"sms.svg"},keywords:["SMS tools 3","SMS","Messaging"],overview:"# SMS\n\nSend notifications to `smstools3` using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\nThe SMS Server Tools 3 is a SMS Gateway software which can send and receive short messages through GSM modems and mobile phones.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- [Install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) `smsd`\n- To ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:\n  - Have write permissions to /tmp and /var/spool/sms/outgoing\n  - Be a member of group smsd\n  - To ensure that the steps above are successful, just su netdata and execute sendsms phone message.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| [sendsms](#option-sendsms) | Set the path for `sendsms`, otherwise Netdata will search for it in your system `$PATH:` | YES | yes |\n| SEND_SMS | Set `SEND_SMS` to `YES`. |  | yes |\n| [DEFAULT_RECIPIENT_SMS](#option-default-recipient-sms) | Set DEFAULT_RECIPIENT_SMS to the phone number you want the alert notifications to be sent to. You can define multiple phone numbers like this: PHONE1 PHONE2. |  | yes |\n\n<a id="option-sendsms"></a>\n##### sendsms\n\n# The full path of the sendsms command (smstools3).\n# If empty, the system $PATH will be searched for it.\n# If not found, SMS notifications will be silently disabled.\nsendsms="/usr/bin/sendsms"\n\n\n<a id="option-default-recipient-sms"></a>\n##### DEFAULT_RECIPIENT_SMS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_sms[sysadmin]="PHONE1"\nrole_recipients_sms[domainadmin]="PHONE2"\nrole_recipients_sms[dba]="PHONE3"\nrole_recipients_sms[webmaster]="PHONE4"\nrole_recipients_sms[proxyadmin]="PHONE5"\nrole_recipients_sms[sitemgr]="PHONE6"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# SMS Server Tools 3 (smstools3) global notification options\nSEND_SMS="YES"\nDEFAULT_RECIPIENT_SMS="1234567890"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/smstools3/metadata.yaml"},{id:"notify-smseagle",meta:{name:"SMSEagle",link:"https://www.smseagle.eu/",categories:["notify.agent"],icon_filename:"smseagle.svg"},keywords:["smseagle"],overview:"# SMSEagle\n\nForward notifications to SMSEagle device to send SMS, MMS, wake-up, or text-to-speech calls.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\nBefore using the API, you\'ll need to enable API access on your SMSEagle device by following these steps:\n\n1. Navigate to the Web-GUI and select the "Users" menu.\n2. Create a new user account with "User" access level.\n3. Locate the "Access to API" option next to your newly created user.\n4. Select APIv2 and click the "Generate new token" button to create your API access token.\n5. Set up the appropriate permissions in the APIv2 Permission section.\n\nOptional: Enable the "Access to resources of all users" checkbox if you want this API key to access data across all users. By default, the API key can only access data created under its credentials.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| [DEFAULT_RECIPIENT_SMSEAGLE](#option-default-recipient-smseagle) | If a role\'s recipients are not configured, a notification will be sent to this SMS recipient (empty = do not send a notification for unconfigured roles). Multiple recipients can be given like this: "PHONE1,PHONE2..." |  | yes |\n| [SMSEAGLE_API_URL](#option-smseagle-api-url) |  |  | yes |\n| [SMSEAGLE_API_ACCESSTOKEN](#option-smseagle-api-accesstoken) |  |  | yes |\n| [SMSEAGLE_MSG_TYPE](#option-smseagle-msg-type) |  | sms | yes |\n| [SMSEAGLE_CALL_DURATION](#option-smseagle-call-duration) |  | 10 | yes |\n| [SMSEAGLE_VOICE_ID](#option-smseagle-voice-id) |  | 10 | yes |\n\n<a id="option-default-recipient-smseagle"></a>\n##### DEFAULT_RECIPIENT_SMSEAGLE\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different recipients per role, by editing `DEFAULT_RECIPIENT_SMSEAGLE` with the number you want, in the following entries at the bottom of the same file:\n```\nrole_recipients_smseagle[sysadmin]="+11222333444"\nrole_recipients_smseagle[domainadmin]="+11222333445"\nrole_recipients_smseagle[dba]="+11222333446"\nrole_recipients_smseagle[webmaster]="+11222333447"\nrole_recipients_smseagle[proxyadmin]="+11222333448"\nrole_recipients_smseagle[sitemgr]="+11222333449"\n```\n\n\n<a id="option-smseagle-api-url"></a>\n##### SMSEAGLE_API_URL\n\nThe url of the SMSEagle device accessible from NetData, e.g https://192.168.0.101\n\n\n<a id="option-smseagle-api-accesstoken"></a>\n##### SMSEAGLE_API_ACCESSTOKEN\n\nAn access token for the user created at SMSEagle device\n\n\n<a id="option-smseagle-msg-type"></a>\n##### SMSEAGLE_MSG_TYPE\n\nChoose a type of message/call. Available types: sms, mms, ring (wake-up call), tts (text-to-speech call), tts_advanced (multilanguage text-to-speech call). Be aware that some types require additional parameters to be set.\n\n\n<a id="option-smseagle-call-duration"></a>\n##### SMSEAGLE_CALL_DURATION\n\nCall duration, parameter required for Ring, TTS and TTS Advanced.\n\n\n<a id="option-smseagle-voice-id"></a>\n##### SMSEAGLE_VOICE_ID\n\nID of the voice model, required for TTS Advanced.\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# SMSEagle options\n\nSEND_SMSEAGLE="YES"\nSMSEAGLE_API_URL="XXXXXXXX"\nSMSEAGLE_API_ACCESSTOKEN="XXXXXXX"\nSMSEAGLE_MSG_TYPE="sms"\nSMSEAGLE_CALL_DURATION="10"\nSMSEAGLE_VOICE_ID="1"\nDEFAULT_RECIPIENT_SMSEAGLE="+11222333444"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/smseagle/metadata.yaml"},{id:"notify-syslog",meta:{name:"syslog",link:"",categories:["notify.agent"],icon_filename:"syslog.png"},keywords:["syslog"],overview:"# syslog\n\nSend notifications to Syslog using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- A working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SYSLOG_FACILITY | Set `SYSLOG_FACILITY` to the facility used for logging, by default this value is set to `local6`. |  | yes |\n| [DEFAULT_RECIPIENT_SYSLOG](#option-default-recipient-syslog) | Set `DEFAULT_RECIPIENT_SYSLOG` to the recipient you want the alert notifications to be sent to. |  | yes |\n| [SEND_SYSLOG](#option-send-syslog) | Set SEND_SYSLOG to YES, make sure you have everything else configured before turning this on. |  | yes |\n\n<a id="option-default-recipient-syslog"></a>\n##### DEFAULT_RECIPIENT_SYSLOG\n\nTargets are defined as follows:\n\n```\n[[facility.level][@host[:port]]/]prefix\n```\n\nprefix defines what the log messages are prefixed with. By default, all lines are prefixed with \'netdata\'.\n\nThe facility and level are the standard syslog facility and level options, for more info on them see your local logger and syslog documentation. By default, Netdata will log to the local6 facility, with a log level dependent on the type of message (crit for CRITICAL, warning for WARNING, and info for everything else).\n\nYou can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).\n\nYou can define multiple recipients like this: daemon.notice@loghost:514/netdata daemon.notice@loghost2:514/netdata.\nAll roles will default to this variable if left unconfigured.\n\n\n<a id="option-send-syslog"></a>\n##### SEND_SYSLOG \n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_syslog[sysadmin]="daemon.notice@loghost1:514/netdata"\nrole_recipients_syslog[domainadmin]="daemon.notice@loghost2:514/netdata"\nrole_recipients_syslog[dba]="daemon.notice@loghost3:514/netdata"\nrole_recipients_syslog[webmaster]="daemon.notice@loghost4:514/netdata"\nrole_recipients_syslog[proxyadmin]="daemon.notice@loghost5:514/netdata"\nrole_recipients_syslog[sitemgr]="daemon.notice@loghost6:514/netdata"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# syslog notifications\n\nSEND_SYSLOG="YES"\nSYSLOG_FACILITY=\'local6\'\nDEFAULT_RECIPIENT_SYSLOG="daemon.notice@loghost6:514/netdata"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/syslog/metadata.yaml"},{id:"notify-teams",meta:{name:"Microsoft Teams",link:"https://www.microsoft.com/en-us/microsoft-teams/log-in",categories:["notify.agent"],icon_filename:"msteams.svg"},keywords:["Microsoft","Teams","MS teams"],overview:"# Microsoft Teams\n\nYou can send Netdata alerts to Microsoft Teams using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Microsoft Teams. You can use the same on all your Netdata servers (or you can have multiple if you like).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_MSTEAMS | Set `SEND_MSTEAMS` to YES | YES | yes |\n| MSTEAMS_WEBHOOK_URL | set `MSTEAMS_WEBHOOK_URL` to the incoming webhook URL as given by Microsoft Teams. |  | yes |\n| [DEFAULT_RECIPIENT_MSTEAMS](#option-default-recipient-msteams) | Set `DEFAULT_RECIPIENT_MSTEAMS` to the encoded Microsoft Teams channel name you want the alert notifications to be sent to. |  | yes |\n\n<a id="option-default-recipient-msteams"></a>\n##### DEFAULT_RECIPIENT_MSTEAMS\n\nIn Microsoft Teams the channel name is encoded in the URI after `/IncomingWebhook/`. You can define multiple channels like this: `CHANNEL1` `CHANNEL2`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_msteams[sysadmin]="CHANNEL1"\nrole_recipients_msteams[domainadmin]="CHANNEL2"\nrole_recipients_msteams[dba]="databases CHANNEL3"\nrole_recipients_msteams[webmaster]="CHANNEL4"\nrole_recipients_msteams[proxyadmin]="CHANNEL5"\nrole_recipients_msteams[sitemgr]="CHANNEL6"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Microsoft Teams (office.com) global notification options\n\nSEND_MSTEAMS="YES"\nMSTEAMS_WEBHOOK_URL="https://outlook.office.com/webhook/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/IncomingWebhook/CHANNEL/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"\nDEFAULT_RECIPIENT_MSTEAMS="XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/msteams/metadata.yaml"},{id:"notify-telegram",meta:{name:"Telegram",link:"https://telegram.org/",categories:["notify.agent"],icon_filename:"telegram.svg"},keywords:["Telegram"],overview:"# Telegram\n\nSend notifications to Telegram using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. Invite your bot to a group where you want it to send messages.\n- The chat ID for every chat you want to send messages to. Invite [@myidbot](https://t.me/myidbot) bot to the group that will receive notifications, and write the command `/getgroupid@myidbot` to get the group chat ID. Group IDs start with a hyphen, supergroup IDs start with `-100`.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_TELEGRAM | Set `SEND_TELEGRAM` to YES | YES | yes |\n| TELEGRAM_API_URL | Set `TELEGRAM_API_URL` to the corresponding URL if you have your own Telegram Bot API server (e.g., for privacy or local hosting). Defaults to the official Telegram API. | https://api.telegram.org | no |\n| TELEGRAM_BOT_TOKEN | set `TELEGRAM_BOT_TOKEN` to your bot token. |  | yes |\n| [DEFAULT_RECIPIENT_TELEGRAM](#option-default-recipient-telegram) | Set the `DEFAULT_RECIPIENT_TELEGRAM` variable in your config file to your Telegram chat ID (find it with @myidbot). Separate multiple chat IDs with spaces. To send alerts to a specific topic within a chat, use `chatID:topicID`. |  | yes |\n\n<a id="option-default-recipient-telegram"></a>\n##### DEFAULT_RECIPIENT_TELEGRAM\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_telegram[sysadmin]="-49999333324"\nrole_recipients_telegram[domainadmin]="-49999333389"\nrole_recipients_telegram[dba]="-10099992222"\nrole_recipients_telegram[webmaster]="-10099992222 -49999333389"\nrole_recipients_telegram[proxyadmin]="-49999333344"\nrole_recipients_telegram[sitemgr]="-49999333876"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# telegram (telegram.org) global notification options\n\nSEND_TELEGRAM="YES"\nTELEGRAM_BOT_TOKEN="111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5"\nDEFAULT_RECIPIENT_TELEGRAM="-49999333876"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/telegram/metadata.yaml"},{id:"notify-twilio",meta:{name:"Twilio",link:"https://www.twilio.com/",categories:["notify.agent"],icon_filename:"twilio.png"},keywords:["Twilio"],overview:"# Twilio\n\nSend notifications to Twilio using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n",setup:'## Setup\n\n\n### Prerequisites\n\n#### \n\n- Get your SID, and Token from https://www.twilio.com/console\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary="Config Options" %}\n\n\n| Option | Description | Default | Required |\n|:-----|:------------|:--------|:---------:|\n| SEND_TWILIO | Set `SEND_TWILIO` to YES | YES | yes |\n| TWILIO_ACCOUNT_SID | set `TWILIO_ACCOUNT_SID`  to your account SID. |  | yes |\n| TWILIO_ACCOUNT_TOKEN | Set `TWILIO_ACCOUNT_TOKEN` to your account token. |  | yes |\n| TWILIO_NUMBER | Set `TWILIO_NUMBER` to your account\'s number. |  | yes |\n| [DEFAULT_RECIPIENT_TWILIO](#option-default-recipient-twilio) | Set DEFAULT_RECIPIENT_TWILIO to the number you want the alert notifications to be sent to. You can define multiple numbers like this: +15555555555 +17777777777. |  | yes |\n\n<a id="option-default-recipient-twilio"></a>\n##### DEFAULT_RECIPIENT_TWILIO\n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient\'s number you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_twilio[sysadmin]="+15555555555"\nrole_recipients_twilio[domainadmin]="+15555555556"\nrole_recipients_twilio[dba]="+15555555557"\nrole_recipients_twilio[webmaster]="+15555555558"\nrole_recipients_twilio[proxyadmin]="+15555555559"\nrole_recipients_twilio[sitemgr]="+15555555550"\n```\n\n\n\n{% /details %}\n\n\n#### via File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-configuration-files) script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#locate-your-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n\n##### Examples\n\n###### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Twilio (twilio.com) SMS options\n\nSEND_TWILIO="YES"\nTWILIO_ACCOUNT_SID="xxxxxxxxx"\nTWILIO_ACCOUNT_TOKEN="xxxxxxxxxx"\nTWILIO_NUMBER="xxxxxxxxxxx"\nDEFAULT_RECIPIENT_TWILIO="+15555555555"\n\n```\n',troubleshooting:'## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n',integration_type:"agent_notification",edit_link:"https://github.com/netdata/netdata/blob/master/src/health/notifications/twilio/metadata.yaml"},{id:"notify-cloud-awssns",meta:{name:"Amazon SNS",link:"https://aws.amazon.com/sns/",categories:["notify.cloud"],icon_filename:"awssns.png"},keywords:["awssns"],setup:"## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- An AWS account with AWS SNS access\n\n### AWS SNS Configuration\n\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n    - On AWS SNS management console click on **Create topic**\n    - On the **Details** section, select the standard type and provide the topic name\n    - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n3. Copy the **Topic ARN** in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the AWS SNS Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n    - **Notification settings**\n      - Configuration name (optional): A name for your configuration in order to easily refer to it\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - Topic ARN: The topic provided on AWS SNS (with region) for where to publish your notifications.\n\n",integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-discord",meta:{name:"Discord",link:"https://discord.com/",categories:["notify.cloud"],icon_filename:"discord.png"},keywords:["discord","community"],setup:"## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n\n### Discord Server Configuration\n\n1. Go to **Server Settings** --\x3e **Integrations**\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Keep note of the **Webhook URL** as you will need it for the configuration of the integration on the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Discord Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n    - **Notification settings**\n      - Configuration name (optional): A name for your configuration in order to easily refer to it\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - Webhook URL: The URL you copied from the previous section\n      - Channel Parameters: Select the channel type which the notifications will be sent to, if it is a Forum channel, you need to specify a thread name\n\n",integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-ilert",meta:{name:"ilert",link:"https://www.ilert.com/",categories:["notify.cloud"],icon_filename:"ilert.svg"},keywords:["ilert"],setup:'## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on ilert to add new Alert sources.\n\n### ilert Configuration\n\n1. From the navigation bar, open the Alert sources drop down and click "Alert sources"\n2. Click on the "+ Create a new alert source" button\n3. Configure an Alert source:\n    - Select "API integration" and click Next\n    - Provide a name that suits the source\'s purpose, for example "Netdata"\n    - Select Escalation policy\n    - Select Alert grouping (optional)\n4. Obtain the API Key:\n    - Once the Alert source is created, you will be provided with an API key. Copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the ilert Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n    - **Notification settings**\n      - Configuration name (optional): A name for your configuration in order to easily refer to it\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - Alert Source API key: The key you copied in the ilert configuration step.\n\n',integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-mattermost",meta:{name:"Mattermost",link:"https://mattermost.com/",categories:["notify.cloud"],icon_filename:"mattermost.png"},keywords:["mattermost"],setup:"## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Mattermost to add new integrations.\n\n### Mattermost Server Configuration\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n    - If you don't have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook.\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook URL that looks like `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Mattermost Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n    - **Notification settings**\n      - Configuration name (optional): A name for your configuration in order to easily refer to it\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - Webhook URL: URL provided on Mattermost for the channel you want to receive your notifications\n\n",integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-microsoftteams",meta:{name:"Microsoft Teams",link:"https://www.microsoft.com/en-us/microsoft-teams",categories:["notify.cloud"],icon_filename:"teams.svg"},keywords:["microsoft","teams"],setup:'## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- A [Microsoft Teams Essentials subscription](https://www.microsoft.com/en-sg/microsoft-teams/essentials) or higher. Note that this is a **paid** feature\n\n### Microsoft Teams Configuration\n\n1. Navigate to the desired Microsoft Teams channel and hover over the channel name. Click the three dots icon that appears\n2. Select "Workflows" from the options, then choose "Post to a channel when a webhook request is received"\n3. **Configure Workflow Details**\n    - Give your workflow a name, such as "Netdata Alerts"\n    - Select the target team and channel where you will receive notifications\n    - Click "Add workflow"\n4. Once the workflow is created, you will receive a unique Workflow Webhook URL, copy it, in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Microsoft Teams Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n    - **Notification settings**\n      - Configuration name (optional): A name for your configuration in order to easily refer to it\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - Microsoft Teams Incoming Webhook URL: The Incoming Webhook URL that you copied earlier.\n\n',integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-mobile-app",meta:{name:"Netdata Mobile App",link:"https://netdata.cloud",categories:["notify.cloud"],icon_filename:"netdata.png"},keywords:["mobile-app","phone","personal-notifications"],setup:"## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- You need to have the Netdata Mobile App installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration and device linking\n\nIn order to login to the Netdata Mobile App\n\n1. Download the Netdata Mobile App from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose your Sign-in option\n  - Email Address: Enter the email address of your registered Netdata Cloud account and click on the verification link received by email on your mobile device.\n  - Sign-in with QR Code: Scan the QR code from the Netdata Cloud UI under **Profile Picture** --\x3e **Settings** --\x3e **Notifications** --\x3e **Mobile App Notifications** --\x3e **Show QR Code**\n\n### Netdata Configuration\n\nAfter linking your device, enable the toggle for **Mobile App Notifications** under the same settings panel.\n\n",integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-opsgenie",meta:{name:"Opsgenie",link:"https://www.atlassian.com/software/opsgenie",categories:["notify.cloud"],icon_filename:"opsgenie.png"},keywords:["opsgenie","atlassian"],setup:"## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\n1. Go to the integrations tab of your team, click **Add integration**\n2. Pick **API** from the available integrations and copy the API Key in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Opsgenie Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n    - **Notification settings**\n      - Configuration name (optional): A name for your configuration in order to easily refer to it\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - API Key: The key provided on Opsgenie for the channel you want to receive your notifications\n\n",integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-pagerduty",meta:{name:"PagerDuty",link:"https://www.pagerduty.com/",categories:["notify.cloud"],icon_filename:"pagerduty.png"},keywords:["pagerduty"],setup:"## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a PagerDuty service to receive events using webhooks.\n\n### PagerDuty Server Configuration\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. On the third step of the service creation, select `Events API V2` Integration\n3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** in order to add them to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n    - **Notification settings**\n      - Configuration name (optional): A name for your configuration in order to easily refer to it\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - Integration Key: A 32 character key provided by PagerDuty to receive events on your service.\n      - Integration URL (Alert Events): The URL provided by PagerDuty where Netdata Cloud will send notifications.\n\n",integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-rocketchat",meta:{name:"RocketChat",link:"https://www.rocket.chat/",categories:["notify.cloud"],icon_filename:"rocketchat.png"},keywords:["rocketchat"],setup:"## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on RocketChat to add new integrations.\n\n### RocketChat Server Configuration\n\nSteps to configure your RocketChat server to receive notifications from Netdata Cloud:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations\n2. Click **+New** at the top right corner\n3. For more details about each parameter, check [Create a new incoming webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook)\n4. You will end up with a webhook endpoint that looks like `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n    - **Notification settings**\n      - Configuration name (optional): A name for your configuration in order to easily refer to it\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - Webhook URL: URL provided on RocketChat for the channel you want to receive your notifications\n\n",integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-servicenow",meta:{name:"ServiceNow",link:"https://www.servicenow.com/",categories:["notify.cloud"],icon_filename:"servicenow.png"},keywords:["servicenow"],setup:"## Setup\n\n\n### Prerequisites\n\n- Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The ServiceNow **Event Management** plugin enabled on your instance\n- A ServiceNow user with the `evt_mgmt_admin` role and a password for API access\n\n### ServiceNow Configuration\n\n1. Verify that the **Event Management** plugin is activated on your ServiceNow instance.\n2. Navigate to **System Security** --\x3e **Users** and create a dedicated integration user if one does not already exist.\n3. Assign the `evt_mgmt_admin` role to this user and set a strong password. Keep the username and password available for the Netdata configuration step.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the ServiceNow Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n    - **Notification settings**\n      - Configuration name (optional): A descriptive name to identify this notification configuration\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - Instance URL: The base URL of your ServiceNow instance (for example, `https://my-instance.service-now.com/`).\n      - Username: The username of the ServiceNow integration user you created.\n      - Password: The password of the ServiceNow integration user you created.\n\n",integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-slack",meta:{name:"Slack",link:"https://slack.com/",categories:["notify.cloud"],icon_filename:"slack.png"},keywords:["slack"],setup:"## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\n1. Create an app to receive webhook integrations. Check the [Slack documentation](https://api.slack.com/apps?new_app=1) for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n    - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n    - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n    - Specify the channel where you want your notifications to be delivered\n    - Once completed, copy the Webhook URL in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Slack Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n    - **Notification settings**\n      - Configuration name (optional): A name for your configuration in order to easily refer to it\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - Webhook URL: URL provided on Slack for the channel you want to receive your notifications\n\n",integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-splunk",meta:{name:"Splunk",link:"https://splunk.com/",categories:["notify.cloud"],icon_filename:"splunk-black.svg"},keywords:["Splunk"],setup:"## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions on how to set it up.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n    - **Notification settings**\n      - Configuration name (optional): A name for your configuration in order to easily refer to it\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - HTTP Event Collector URI: The URI of your HTTP event collector in Splunk\n      - HTTP Event Collector Token: The token that Splunk provided to you when you created the HTTP Event Collector\n\n",integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-telegram",meta:{name:"Telegram",link:"https://telegram.org/",categories:["notify.cloud"],icon_filename:"telegram.svg"},keywords:["Telegram"],setup:"## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Telegram bot token, chat ID and optionally the topic ID\n\n### Telegram Configuration\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n    - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n    - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n- To get the topic ID, the easiest way is this: Post a message to that topic, then right-click on it and select `Copy Message Link`. Paste it on a scratchpad and notice that it has the following structure `https://t.me/c/XXXXXXXXXX/YY/ZZ`. The topic ID is `YY` (integer).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Telegram Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n    - **Notification settings**\n      - Configuration name (optional): A name for your configuration in order to easily refer to it\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - Bot Token: The token of your bot\n      - Chat ID: The chat id where your bot will deliver messages to\n      - Topic ID: The identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat.\n\n",integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-victorops",meta:{name:"Splunk VictorOps",link:"https://www.splunk.com/en_us/about-splunk/acquisitions/splunk-on-call.html",categories:["notify.cloud"],icon_filename:"victorops.svg"},keywords:["VictorOps","Splunk","On-Call"],setup:"## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk VictorOps Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n    - **Notification settings**\n      - Configuration name (optional): A name for your configuration in order to easily refer to it\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - Destination URL - The URL provided by VictorOps of your REST endpoint.\n\n",integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"notify-cloud-webhook",meta:{name:"Webhook",link:"https://en.wikipedia.org/wiki/Webhook",categories:["notify.cloud"],icon_filename:"webhook.svg"},keywords:["generic webhooks","webhooks"],setup:'## Setup\n\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Webhook integration\n5. A modal will be presented to you to enter the required details to enable the configuration:\n    - **Notification settings**\n      - Configuration name (optional): A name for your configuration in order to easily refer to it\n      - Rooms: A list of Rooms for which you want to be notified\n      - Notifications: The notification types you want to receive\n    - **Integration configuration**\n      - Webhook URL: The url of the service that Netdata will send notifications to. In order to keep the communication secured, Netdata only accepts HTTPS urls.\n      - Extra headers: Optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n      - Authentication Mechanism, Netdata webhook integration supports 3 different authentication mechanisms.\n        - Mutual TLS (recommended): Default authentication mechanism used if no other method is selected\n        - Basic: The client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**.\n        - Bearer: The client sends a request with an Authorization header that includes a **bearer token**.\n    - **Verification**\n      - Token: The Token from the latest **Test notification** received on the webhook endpoint.\n        - Click on the **Test** button to receive a notification. Token will be embedded in the payload.\n\n### Webhook service\n\nA webhook service allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL.\n\nIn this section, we\'ll go over the steps to set up a generic webhook service, including adding headers, and implementing different types of authorization mechanisms.\n\n#### Netdata webhook integration\n\nNetdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected.\n\nFor alert notifications, the content sent to the destination service contains a JSON object with the following properties:\n\n| field                             | type                            | description                                                               |\n|:----------------------------------|:--------------------------------|:--------------------------------------------------------------------------|\n| message                           | string                          | A summary message of the alert.                                           |\n| alert                             | string                          | The alert the notification is related to.                                 |\n| info                              | string                          | Additional info related with the alert.                                   |\n| chart                             | string                          | The chart associated with the alert.                                      |\n| context                           | string                          | The chart context.                                                        |\n| space                             | string                          | The space where the node that raised the alert is assigned.               |\n| Rooms                             | object\\[object(string,string)\\] | Object with list of Rooms names and urls where the node belongs to.       |\n| family                            | string                          | Context family.                                                           |\n| class                             | string                          | Classification of the alert, e.g. `Error`.                                |\n| severity                          | string                          | Alert severity, can be one of `warning`, `critical` or `clear`.           |\n| date                              | string                          | Date of the alert in ISO8601 format.                                      |\n| duration                          | string                          | Duration the alert has been raised.                                       |\n| additional_active_critical_alerts | integer                         | Number of additional critical alerts currently existing on the same node. |\n| additional_active_warning_alerts  | integer                         | Number of additional warning alerts currently existing on the same node.  |\n| alert_url                         | string                          | Netdata Cloud URL for this alert.                                         |\n\nFor reachability notifications, the JSON object will contain the following properties:\n\n| field            | type    | description                                                                                                                   |\n|:-----------------|:--------|:------------------------------------------------------------------------------------------------------------------------------|\n| message          | string  | A summary message of the reachability alert.                                                                                  |\n| url              | string  | Netdata Cloud URL for the host experiencing the reachability alert.                                                           |\n| host             | string  | The hostname experiencing the reachability alert.                                                                             |\n| severity         | string  | Severity for this notification. If host is reachable, severity will be `info`, if host is unreachable, it will be `critical`. |\n| status           | object  | An object with the status information.                                                                                        |\n| status.reachable | boolean | `true` if host is reachable, `false` otherwise                                                                                |\n| status.text      | string  | Can be `reachable` or `unreachable`                                                                                           |\n\n#### Extra headers\n\nWhen setting up a webhook service, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\nBy default, the following headers will be sent in the HTTP request\n\n  |  **Header**  | **Value**        |\n  |:------------:|------------------|\n  | Content-Type | application/json |\n\n#### Authentication mechanisms\n\nNetdata webhook integration supports 3 different authentication mechanisms:\n\n##### Mutual TLS authentication (recommended)\n\nIn mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\nThis is the default authentication mechanism used if no other method is selected.\n\nTo take advantage of mutual TLS, you can configure your server to verify Netdata\'s client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\nThe steps to perform this validation are as follows:\n\n- Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n  <details>\n    <summary>Netdata CA certificate</summary>\n\n    ```text\n    -----BEGIN CERTIFICATE-----\n    MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n    BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n    Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n    EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n    MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n    Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n    ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n    IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n    ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n    QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n    qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n    fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n    s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n    Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n    jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n    4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n    Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n    PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n    R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n    AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n    Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n    1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n    VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n    rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n    qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n    7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n    ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n    X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n    FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n    Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n    nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n    5zrbwvQf\n    -----END CERTIFICATE-----\n    ```\n\n  </details>\n\n- Enable client certificate validation on the web server that is doing the TLS termination. Below there are examples on how to perform this configuration in `NGINX` and `Apache`.\n\n  **NGINX**\n\n  ```bash\n  server {\n      listen 443 ssl default_server;\n\n      # ... existing SSL configuration for server authentication ...\n      ssl_verify_client on;\n      ssl_client_certificate /path/to/Netdata_CA.pem;\n\n      location / {\n          if ($ssl_client_s_dn !~ "CN=app.netdata.cloud") {\n              return 403;\n          }\n          # ... existing location configuration ...\n      }\n  }\n  ```\n\n  **Apache**\n\n  ```bash\n  Listen 443\n  <VirtualHost *:443>\n      # ... existing SSL configuration for server authentication ...\n      SSLVerifyClient require\n      SSLCACertificateFile "/path/to/Netdata_CA.pem"\n  </VirtualHost>\n  <Directory /var/www/>\n      Require expr "%{SSL_CLIENT_S_DN_CN} == \'app.netdata.cloud\'"\n      # ... existing directory configuration ...\n  </Directory>\n  ```\n\n##### Basic authentication\n\nIn basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n##### Bearer token authentication\n\nIn bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n',integration_type:"cloud_notification",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml",troubleshooting:""},{id:"logs-opentelemetry",meta:{name:"OpenTelemetry Logs",link:"https://github.com/netdata/netdata/blob/master/src/crates/netdata-log-viewer/README.md",categories:["logs"],icon_filename:"opentelemetry.svg"},keywords:["opentelemetry","otel","otlp","logs"],overview:'# OpenTelemetry Logs\n\nThe OpenTelemetry log viewer plugin by Netdata makes viewing, exploring and analyzing OpenTelemetry logs simple and efficient.\n\nIt works with logs ingested by the `otel.plugin`, which receives OTLP-formatted log data and stores them in\nsystemd-compatible journal files. The log viewer provides the same powerful exploration interface used for\nsystemd journal logs.\n\n\n## Visualization\n\nYou can start exploring OpenTelemetry logs on the "Logs" tab of the Netdata UI.\n\n\n## Key features\n\n- Views logs ingested via **OpenTelemetry OTLP/gRPC** protocol.\n- Allows filtering on **any log field** or **field value**, for any time-frame.\n- Allows **full text search** (`grep`) on all log fields, for any time-frame.\n- Provides a **histogram** for log entries over time, with a break down per field-value, for any field and any time-frame.\n- Uses **fast indexed lookups** with pre-built indexes for performance.\n- Supports **faceted search** across log fields (severity, hostname, service, etc.).\n- In PLAY mode shows new log entries immediately after they are received.\n',setup:"## Setup\n\n## Prerequisites\n\n- A Netdata Cloud account\n- The `otel.plugin` configured to ingest OpenTelemetry logs\n\n\n## Configuration\n\nThere is no configuration needed for this integration.\n",integration_type:"logs",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/logs/metadata.yaml"},{id:"logs-systemd-journal",meta:{name:"Systemd Journal Logs",link:"https://github.com/netdata/netdata/blob/master/src/collectors/systemd-journal.plugin/README.md",categories:["logs"],icon_filename:"netdata.png"},keywords:["systemd","journal","logs"],overview:'# Systemd Journal Logs\n\nThe `systemd` journal plugin by Netdata makes viewing, exploring and analyzing `systemd` journal logs simple and efficient.\n\nIt automatically discovers available journal sources, allows advanced filtering, offers interactive visual representations and supports exploring the logs of both individual servers and the logs on infrastructure wide journal centralization servers.\n\nThe plugin automatically detects the available journal sources, based on the journal files available in `/var/log/journal` (persistent logs) and `/run/log/journal` (volatile logs).\n\n\n## Visualization\n\nYou can start exploring `systemd` journal logs on the "Logs" tab of the Netdata UI.\n\n\n## Key features\n\n- Works on both **individual servers** and **journal centralization servers**.\n- Supports `persistent` and `volatile` journals.\n- Supports `system`, `user`, `namespaces` and `remote` journals.\n- Allows filtering on **any journal field** or **field value**, for any time-frame.\n- Allows **full text search** (`grep`) on all journal fields, for any time-frame.\n- Provides a **histogram** for log entries over time, with a break down per field-value, for any field and any time-frame.\n- Works directly on journal files, without any other third-party components.\n- Supports coloring log entries, the same way `journalctl` does.\n- In PLAY mode provides the same experience as `journalctl -f`, showing new log entries immediately after they are received.\n',setup:"## Setup\n\n## Prerequisites\n\n- A Netdata Cloud account\n\n\n## Configuration\n\nThere is no configuration needed for this integration.\n",integration_type:"logs",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/logs/metadata.yaml"},{id:"windows-event-logs",meta:{name:"Windows Event Logs",link:"https://github.com/netdata/netdata/blob/master/src/collectors/windows-events.plugin/README.md",categories:["logs"],icon_filename:"windows.svg"},keywords:["windows","windows events","logs"],overview:'# Windows Event Logs\n\nThe Windows Events plugin by Netdata makes viewing, exploring and analyzing Windows Events simple and\nefficient.\n\nThe plugin automatically detects all the available channels and offers a list of "Event Channels".\n\nBy default, it aggregates events from all event channels, providing a unified view of all events.\n\n\n## Visualization\n\nYou can start exploring Windows event logs on the "Logs" tab of the Netdata UI.\n\n\n## Key features\n\n- Supports **Windows Event Logs (WEL)**.\n- Supports **Event Tracing for Windows (ETW)** and **TraceLogging (TL)**, when events are routed to Event Log.\n- Allows filtering on all System Events fields.\n- Allows **full text search** (`grep`) on all System and User fields.\n- Provides a **histogram** for log entries over time, with a break down per field-value, for any System Event field and any\n  time-frame.\n- Supports coloring log entries based on severity.\n- In PLAY mode it "tails" all the Events, showing new log entries immediately after they are received.\n',setup:"## Setup\n\n## Prerequisites\n\n- Netdata Cloud paid subscription\n\n\n## Configuration\n\nThere is no configuration needed for this integration.\n",integration_type:"logs",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/logs/metadata.yaml"},{id:"oidc-authentication",meta:{name:"OIDC",link:"https://netdata.cloud",categories:["auth"],icon_filename:"openid.svg"},keywords:["sso","oidc"],overview:"# OIDC\n\nIntegrate your organization's Authorization Servers with Netdata to better manage your team's access controls to Netdata Cloud.\n",setup:"## Setup\n\n\n### Prerequisites\n- Authorization Server with OIDC protocol supported\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- Space needs to be on a paid plan\n\n### Setting up Authorization Server\nYour server should follow the [full specification for OIDC](https://openid.net/specs/openid-connect-core-1_0.html).\nIn order to integrate your Authorization Server with Netdata the creation of a client is required. Clients are applications and services that can request authentication of a user.\nThe access settings for your client are the following:\n\n| field                    | value                                                 |\n| :--                      | :--                                                   |\n| Root URL                 | `https://app.netdata.cloud/`                          |\n| Home/Initiate login URL  | `https://app.netdata.cloud/api/v2/auth/account/auth-server?id={netdata-space-id}&redirect_uri=https://app.netdata.cloud/sign-in&register_uri=https://app.netdata.cloud/sign-up/verify`  |\n| Redirect URL             | `https://app.netdata.cloud/api/v2/auth/account/auth-server/callback`  |\n\n### Netdata Configuration Steps\n1. Click on the Space settings cog (located above your profile icon)\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. On the OIDC card, click on **Configure**\n4. Fill in the required credentials:\n    - **Issuer URL** the Authorization Server Issuer URL, e.g. `https://my-auth-server.com/`\n    - **Client ID** the Client ID from the created client\n    - **Client Secret** the Client Secret from the created client\n\n### Supported features\n* SP-initiated SSO (Single Sign-On)\n* IdP-initiated SSO\n\n### SP-initiated SSO\n\nIf you start your authentication flow from Netdata sign-in page please check [these steps](https://github.com/netdata/netdata/blob/master/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md).\n\n\n### Reference\nhttps://openid.net/developers/how-connect-works/\n\n",integration_type:"authentication",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-authentication/metadata.yaml",troubleshooting:""},{id:"okta-authentication",meta:{name:"Okta SSO",link:"https://netdata.cloud",categories:["auth"],icon_filename:"okta.png"},keywords:["sso","okta","okta-sso"],overview:"# Okta SSO\n\nIntegrate your organization's Okta account with Netdata to better manage your team's access controls to Netdata Cloud.\n",setup:"## Setup\n\n\n### Prerequisites\n- An Okta account\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- Space needs to be on a paid plan\n\n### Setting up Okta\nSteps needed to be done on Okta Admin Portal:\n1. Click on **Applications** tab and choose to **Browse App Catalogue**\n2. Find Netdata's preconfigured app for easy setup and click **Add Integration**\n3. Give the app, that will be in your apps dashboard, the preferred **Application label** and click **Next** to move to the Sign-On options tab\n4. In the **Sign-On Options** all the values we expect are already filled and no additional data is required\n5. Click **Done**. You are able to go back and edit any fields later if need be\n6. Go to the **Assignments** tab and enter the People or Group assignments as per your organization\u2019s policies\n\n### Netdata Configuration Steps\n1. Click on the Space settings cog (located above your profile icon)\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. On the Okta SSO card, click on **Configure**\n4. Fill in the [required credentials](https://developer.okta.com/docs/guides/find-your-app-credentials/main/), you get them from **Okta Admin Portal**:\n    - **Issuer URL** you can get it from your profile icon on top, e.g. `https://company-name.okta.com`\n    - **Client ID** you can get it from **General** tab on application you configured on Okta\n    - **Client Secret** you can get it from **General** tab on application you configured on Okta\n\n### Supported features\n* SP-initiated SSO (Single Sign-On)\n* IdP-initiated SSO\n\n### SP-initiated SSO\n\nIf you start your authentication flow from Netdata sign-in page please check [these steps](https://github.com/netdata/netdata/blob/master/docs/netdata-cloud/authentication-and-authorization/enterprise-sso-authentication.md).\n\n",integration_type:"authentication",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-authentication/metadata.yaml",troubleshooting:""},{id:"scim",meta:{name:"SCIM",link:"https://netdata.cloud",categories:["auth"],icon_filename:"scim.svg"},keywords:["scim","identity-management"],overview:"# SCIM\n\nThe System for Cross-domain Identity Management (SCIM) specification is designed to simplify the management of user identities in cloud-based applications and services.\n",setup:"## Setup\n\n\n### Prerequisites\n- A Netdata Cloud account\n- Admin access to the Space\n- The Space must be on a paid plan\n- OIDC/SSO integration must already be enabled in one of your Spaces\n\n### Supported Features\nThis integration adheres to SCIM v2 specifications. Supported features include:\n\n- User Resource Management (urn:ietf:params:scim:schemas:core:2.0:User)\n- Group Resource Management (urn:ietf:params:scim:schemas:core:2.0:Group)\n- Create users\n- Import users\n- Update user attributes\n- Deactivate users\n- Create groups\n- Nested groups supported\n- Patch operations: Supported\n- Filtering: Supported (max results: 200)\n- Authentication schemes: OAuth Bearer Token\n- Import groups: Not supported\n- Bulk operations: Not supported\n- Password synchronization: Not supported, as we rely on SSO/OIDC authentication\n- Push Now: Not supported\n- eTag: Not supported\n\n### Netdata Configuration Steps\n1. Click on the Space settings cog (located above your profile icon).\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. In the SCIM card, click on **Activate**.\n4. Depending on your situation:\n   - If OIDC/SSO integration is already enabled in your Space, click **Activate**.\n   - If you already have a SCIM integration in another Space and want to create a linked integration here, enter the SCIM token from the original integration and click **Activate**.\n5. If the setup is successful, you will receive two parameters:\n   - **Base URL**: Use this URL as the base URL for your SCIM client.\n   - **Token**: Use this token for Bearer Authentication with your SCIM client.\n\n## Client Configuration Steps\n\n### Okta\nIf you're configuring SCIM in Okta, and you already have the Token from the previous section, follow these steps:\n\n1. Go to the **Applications** menu on the left-hand panel and select the **Netdata** application.\n2. In the **Netdata** application, navigate to the **Provisioning** tab.\n3. Click on **Configure API Integration** and check the box for **Enable API Integration**.\n4. Enter the Token (obtained in the *Netdata Configuration Steps* section) into the **API Token** field, then click **Test API Credentials** to ensure the connection is successful.\n5. If the test is successful, click **Save** to apply the configuration.\n\n## Setting Up Membership Rules\n\n1. Click on the Space settings cog (located above your profile icon).\n2. Navigate to the **User Management** section and select the **Groups** tab.\n3. Once your SCIM client has provisioned groups to Netdata, you'll see a **Create a new rule** button.\n4. Click this button to open the membership rule configuration panel.\n5. For each rule, configure the following three components:\n  - **SCIM Group**: Select the SCIM group that should be mapped\n  - **Netdata Role**: Choose the role that members of this group should have in the space\n  - **Space Rooms**: (Optional) Select specific rooms that these users should be members of\n6. Click **Save** to activate the configuration.\n7. Repeat steps 4-6 to create additional rules as needed.\n\n### How Membership Rules Work\n\n- When a user in your identity provider is assigned to a SCIM group, they will automatically be added to your Netdata Space with the role and room access defined in your rules.\n- If a user is removed from a SCIM group, their access will be adjusted according to your rules.\n- When users match multiple rules, they are granted the highest permission level from all their matching rules.\n- Changes to membership rules take effect immediately for new and existing users.\n\n**Important Considerations**\n- If you had previously manually invited users who are now being provisioned through SCIM, their existing roles and room access will be updated to match your rules.\n- You must create at least one rule that assigns the **Admin** role to a SCIM group. If no admin role is defined in your rules, Netdata will not implement any user membership changes and will display a warning in the workspace.\n\n## Troubleshoot\n\n### Rotating the SCIM Token\nYou can rotate the token provided during SCIM integration setup if needed.\n\nSteps to rotate the token:\n1. Click on the Space settings cog (located above your profile icon).\n2. Click on the **User Management** section and access **Authentication and Authorization** tab.\n3. In the already configured SCIM card, click **Configure**.\n4. Click **Regenerate Token**.\n5. If successful, you will receive a new token for Bearer Authentication with your SCIM client.\n\n### User Keying Between SCIM and OIDC\nOur SCIM (System for Cross-domain Identity Management) integration utilizes OIDC (OpenID Connect) to authenticate users.\nTo ensure users are correctly identified and authenticated between SCIM and OIDC, we use the following mapping:\n\n- SCIM externalID \u2194 OIDC sub\n\nThis mapping ensures that the identity of users remains consistent and secure across both systems.\n\n**Important**: Ensure that your OIDC and SCIM systems follow this mapping strictly.\nThe externalID in SCIM must correspond to the subfield in OIDC. Any deviation from this mapping may result\nin incorrect user identification and authentication failures.\n\n## Supported SCIM User properties\n\nOur SCIM server supports the following User attributes:\n\n- userName (required)\n- externalId (required)\n- name.formatted\n- name.familyName\n- name.givenName\n- active\n- emails (we only store the primary email)\n\n**Important Considerations**\n- Configure supported attributes only: Your SCIM client must be configured to send only the attributes listed above. Requests containing unsupported attributes will fail with a `400 Bad Request` error.\n- Okta users: No additional setup needed. The Netdata integration includes the correct attribute configuration automatically.\n\n## FAQ\n\n### Why aren\u2019t users automatically added to Netdata spaces when they\u2019re created through SCIM?\n\nUsers created through SCIM are not automatically assigned to spaces. You need to configure Membership Rules to control space assignments.\n\nSee Setting Up Membership Rules section above for setup instructions.\n\n### Reference\n[SCIM Specification](https://scim.org)\n\n",integration_type:"authentication",edit_link:"https://github.com/netdata/netdata/blob/master/integrations/cloud-authentication/metadata.yaml",troubleshooting:""}];var h=t(87239),m=t(6988),g=t(89942),p=t(14815);function f(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function y(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?f(Object(t),!0).forEach((function(n){(0,i.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):f(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const b=(0,l.Wj)((function(){let{hideEmptyCategories:e=!0}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,h.Fc)(d),t=e=>n.find((n=>n.id===e)),o=(0,m.RF)(u);let i=[];if(e){const e=(0,h.rS)(o,t,n);i=n.filter((n=>{let{id:t}=n;return e.includes(t)}))}else i=n;return i})),E=(0,c.eU)((e=>(0,h.a8)(e(b)))),w=(0,c.eU)((e=>function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];return(0,m.RF)(u,e)}(e(b)))),B=(0,c.eU)(),C=(0,c.eU)((e=>{const n=e(B);if(n)return n;return(e=>{var n;let{categories:t,integrations:o}=e;const{selectedIntegrationCategory:i,selectedIntegration:a}=(0,g.PP)(),s=t.find((e=>{let{id:n}=e;return n===(i||p.yI)})),{tree:r}=(0,h.fk)({category:s,getCategoryById:e=>t.find((n=>n.id===e))}),A=null===(n=o.filter((e=>{let{id:n}=e;return n===a})))||void 0===n?void 0:n[0],c=r.filter(Boolean).map(((e,n)=>y(y({},e),{},{type:"category",level:n+1})));return A&&c.push(y(y({},A),{},{type:"integration"})),c})({categories:e(b),integrations:e(w)})}),((e,n,t)=>{n(B,t)})),M=(0,l.tx)(""),T=(0,l.tx)(null),I=(0,l.Wj)((()=>"true"===(0,g.PP)().integrationsModalOpen)),v=(0,c.eU)((e=>{const n=e(w).filter((e=>e.quickStart>=0));return(0,m.xM)(n)}));var _=t(32465);const Q=["flattenedCategory"];function D(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function x(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?D(Object(t),!0).forEach((function(n){(0,i.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):D(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const k=()=>(0,s.md)(T),R=()=>(0,s.fp)(T),S=()=>(0,s.md)(b),P=()=>(0,s.md)(E),F=()=>{const e=S(),n=(0,_.A)(),[t,i]=(0,s.fp)(C),c=(0,r.AY)(T),l=(0,A.rI)("selectedIntegration"),d=(0,A.rI)("selectedIntegrationTab"),u=(0,a.useCallback)((e=>{const{parents:t}=(0,h.fk)({category:e,getCategoryById:n}),o=[...t,e].map(((e,n)=>x(x({},e),{},{type:"category",level:n+1})));i(o),l(""),d(""),c()}),[e,i,l,c]),m=(0,a.useCallback)((function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},{flattenedCategory:t}=e,a=(0,o.A)(e,Q);const s=t||(a.categories||[])[0],r=s.categoryId||s.id,A=n(r),{parents:l}=(0,h.fk)({category:A,getCategoryById:n}),u=[...l,A].map(((e,n)=>x(x({},e),{},{type:"category",level:n+1})));i([...u,x(x({},a),{},{type:"integration"})]),d(""),c()}),[i,d,c]),g=(0,a.useCallback)((()=>{t.length>0&&(i(t.slice(0,t.length-1)),c())}),[t,i,c]),f=(0,a.useMemo)((()=>{const e=t[t.length-1];return"integration"===(null===e||void 0===e?void 0:e.type)?e:null}),[t]),y=(0,a.useMemo)((()=>{const e=t.filter((e=>"category"===e.type))||[];return e.length?e[e.length-1]:null}),[t]),b=(0,a.useCallback)((()=>{const n=e.find((e=>{let{id:n}=e;return n===p.yI})),t=[e.find((e=>{let{id:t}=e;return t===n.parentId})),n].filter(Boolean).map(((e,n)=>x(x({},e),{},{type:"category",level:n+1})));i(t),c()}),[e,i,c]),E=(0,a.useMemo)((()=>null===y||void 0===y?void 0:y.level),[y]);return{history:t,level:E,selectedCategory:y,selectedIntegration:f,pushCategory:u,pushIntegration:m,pop:g,reset:b,setHistory:i}},Y=()=>{const{selectedCategory:e}=F(),n=S(),t=U();return(0,h.IH)({category:e,integrations:t,categories:n})},U=()=>(0,s.md)(w),N=()=>(0,s.md)(v),j=()=>(0,s.md)(M),z=()=>(0,s.fp)(M),H=()=>(0,r.AY)(M),O=()=>{const e=H(),{reset:n}=F();return(0,a.useCallback)((()=>{e(),n()}),[e,n])},L=()=>(0,s.fp)(I)},37846(e,n,t){"use strict";t.d(n,{$c:()=>r,AG:()=>l,Av:()=>A,BT:()=>d,D6:()=>s,GO:()=>h,Zp:()=>c,gY:()=>u,oO:()=>m});var o=t(51510),i=t(42358),a=t(93234);const s=o.default.div.withConfig({displayName:"styled__LayoutGrid",componentId:"sc-1kju9s3-0"})(["display:grid;grid-template-columns:"," auto;column-gap:32px;padding:0 24px;height:calc( 100vh - ","px - ","px );@media screen and ","{grid-template-columns:"," auto;}@media screen and ","{grid-template-columns:"," auto;}"],(e=>{let{isSidebarCollapsed:n}=e;return n?"".concat(60,"px"):"35%"}),(e=>(0,a.vF)(e.flavour)),(e=>(0,a.u8)(e.flavour)),i.breakpoints.laptop,(e=>{let{isSidebarCollapsed:n}=e;return n?"".concat(60,"px"):"30%"}),i.breakpoints.desktop,(e=>{let{isSidebarCollapsed:n}=e;return n?"".concat(60,"px"):"25%"})),r=o.default.div.withConfig({displayName:"styled__StyledVirtualRow",componentId:"sc-1kju9s3-1"})(["display:grid;grid-template-columns:repeat(",",1fr);grid-auto-rows:","px;column-gap:16px;row-gap:16px;padding-bottom:16px;"],(e=>{let{itemsPerRow:n}=e;return n}),(e=>{let{rowHeight:n}=e;return n>16?n-16:n})),A=(0,o.default)(i.Flex).withConfig({displayName:"styled__CategoryWrapper",componentId:"sc-1kju9s3-2"})(["cursor:pointer;&:hover{background-color:",";}"],(0,i.getColor)("integrationMenuItemHover")),c=(0,o.default)(i.Flex).withConfig({displayName:"styled__Card",componentId:"sc-1kju9s3-3"})(["background:",";img{transition:all 200ms ease-in-out;}&:hover{img{transform:scale(1.2);}}"],(0,i.getColor)("panelBg")),l=(o.default.div.withConfig({displayName:"styled__CardOverlay",componentId:"sc-1kju9s3-4"})(["position:absolute;width:100%;top:0;bottom:0;filter:blur(70px);opacity:0.2;&& img{width:100%;height:100%;}"]),(0,o.default)(i.Icon).withConfig({displayName:"styled__CardIcon",componentId:"sc-1kju9s3-5"})(["position:absolute;top:-20px;left:-35px;transform:rotate(40deg);opacity:0.1;"])),d=(0,o.default)(i.Flex).withConfig({displayName:"styled__CardDescription",componentId:"sc-1kju9s3-6"})(["position:absolute;bottom:0;left:0;transform:translateY(100%);transition:all 200ms ease-in-out;&&.hover{transform:translateY(0);}"]),u=o.default.table.withConfig({displayName:"styled__ContentTable",componentId:"sc-1kju9s3-7"})(["width:100%;border:1px solid ",";margin-top:16px;tr:nth-child(even){background:",";}th{padding:8px 16px;background:",";}td{padding:6px;}"],(0,i.getColor)("borderSecondary"),(0,i.getColor)("mainBackground"),(0,i.getColor)("mainBackground")),h=(0,o.default)(i.PortalSidebar).withConfig({displayName:"styled__SidebarModal",componentId:"sc-1kju9s3-8"})(["width:calc(100vw - 54px);z-index:35;background:",";overflow:",";"],(0,i.getColor)("mainBackground"),(e=>{let{overflow:n="hidden"}=e;return n})),m=(0,o.default)(i.Icon).withConfig({displayName:"styled__CategoryArrow",componentId:"sc-1kju9s3-9"})(["opacity:",";"],(e=>{let{disabled:n}=e;return n?"0":"1"}))},87239(e,n,t){"use strict";t.d(n,{Fc:()=>h,IH:()=>y,PQ:()=>p,YK:()=>m,a8:()=>g,fk:()=>E,q1:()=>b,rS:()=>B,vF:()=>C});var o=t(64467),i=t(80045),a=(t(26910),t(98992),t(54520),t(72577),t(3949),t(81454),t(8872),t(25509),t(65223),t(60321),t(41927),t(11632),t(64377),t(66771),t(12516),t(68931),t(52514),t(35694),t(52774),t(49536),t(21926),t(94483),t(16215),t(62953),t(63950)),s=t.n(a),r=t(14815),A=t(41395),c=t(93234);const l=["children","most_popular"];function d(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function u(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?d(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):d(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const h=function(){let e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null;return(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).reduce(((n,t)=>{n.push(function(e){let{children:n,most_popular:t}=e,o=(0,i.A)(e,l),a=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null;return u(u({},o),{},{name:(0,A.Zr)(o.name),mostPopular:t,parentId:a})}(t,e));const o=t.children||[];return n=[...n,...h(o,t.id)]}),[])},m=function(){const e=[],n=[];(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).forEach((t=>{void 0!==t.priority&&t.priority>=0?e.push(t):n.push(t)}));const t=[],o=[];n.forEach((e=>{e.mostPopular?t.push(e):o.push(e)}));const i=(0,c.Wl)("priority"),a=(0,c.Kr)("name"),s=[...e.sort(i),...t.sort(a)];return{categories:[...s,...o.sort(a)],popular:s,rest:o}},g=function(){return((arguments.length>0&&void 0!==arguments[0]?arguments[0]:[])||[]).filter((e=>{let{parentId:n}=e;return null==n}))},p=e=>{let{category:n,categories:t}=e;return t.filter((e=>{let{parentId:t}=e;return n.id==t}))},f=e=>{let{category:n,categories:t,allSubcategories:o=[]}=e;return(p({category:n,categories:t})||[]).forEach((e=>{o.push(e),f({category:e,categories:t,allSubcategories:o})})),o},y=e=>{let{category:n={},integrations:t=[],categories:o=[]}=e;if(null==n)return[];const i=f({category:n,categories:o}),a=[n.id,...i.map((e=>{let{id:n}=e;return n}))];return t.filter((e=>{const n=e.categories.map((e=>{let{categoryId:n}=e;return n}));for(let t=0;t<n.length;t++)if(a.includes(n[t]))return!0;return!1}))},b=function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];const t=n.find((n=>{let{id:t}=n;return t==e}));return t?null===t.parentId?r.aw[t.id]:b(t.parentId,n):null},E=e=>{let{category:n,getCategoryById:t=s()}=e;const o=(e=>((null===e||void 0===e?void 0:e.id)||"").split(".").reduce(((e,n)=>(e.length?e.push([e[e.length-1],n].join(".")):e.push(n),e)),[]))(n).map((e=>t(e)));return{tree:o,parents:o.length>1?o.slice(0,o.length-1):[],topLevelCategory:o[0]}},w=function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:s(),t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:s(),o=arguments.length>3?arguments[3]:void 0;if(!e)return;const i=t(e);if(!i)return;n(i);const a=o.find((e=>{let{id:n}=e;return n==(null===i||void 0===i?void 0:i.parentId)}));a&&w(a.id,n,t,o)},B=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:s(),t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[];const o=new Set,i=e=>o.add(e.id);return e.forEach((e=>{e.categories.forEach((e=>{let{categoryId:o}=e;w(o,i,n,t)}))})),Array.from(o)},C=e=>{let{height:n,topLevelCategories:t=[],categories:o=[],setState:i=s()}=e;const a={};if(!n||!t.length||!o.length)return a;const A=Math.floor((n/t.length-r.V0)/r.V0);t.forEach((e=>{const n=p({category:e,categories:o});a[e.id]={subcategories:n.length,limit:n.length>A?A:0}})),Object.entries(a).forEach((e=>{let[n,{subcategories:t,limit:o}]=e;n!=r.qC&&o<A&&(a[r.qC]=u(u({},a[r.qC]),{},{limit:a[r.qC].limit+(A-t)}))})),i(a)}},93234(e,n,t){"use strict";t.d(n,{Kr:()=>i,Wl:()=>a,jU:()=>A,u8:()=>r,vF:()=>s});t(98992),t(81454);var o=t(14815);const i=e=>(n,t)=>n[e]<t[e]?-1:n[e]>t[e]?1:0,a=e=>(n,t)=>n[e]-t[e],s=function(){var e;let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:o.Jr;return(null===(e=o.jI[n])||void 0===e||null===(e=e.header)||void 0===e?void 0:e.height)||0},r=function(){var e;let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:o.Jr;return(null===(e=o.jI[n])||void 0===e||null===(e=e.cncf)||void 0===e?void 0:e.height)||0},A=e=>{var n;return"string"===typeof e?e:Array.isArray(e)?e.map(A).join(""):null!==e&&void 0!==e&&null!==(n=e.props)&&void 0!==n&&n.children?A(e.props.children):""}},6988(e,n,t){"use strict";t.d(n,{Cj:()=>f,Fd:()=>y,M6:()=>b,RF:()=>g,xM:()=>p});var o=t(64467),i=t(80045),a=(t(26910),t(98992),t(54520),t(72577),t(3949),t(81454),t(8872),t(62953),t(63950)),s=t.n(a),r=t(15505),A=t(93234);const c=["meta"];function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function d(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?l(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):l(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const u=function(){let{installDescription:e,additionalInfo:n,platformInfo:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return[e,"{% command methods=$methods isNightly=$isNightly claimToken=$claimToken claimUrl=$claimUrl claimRooms=$claimRooms /%}",n,t].join("\n\n")},h={"go.d.plugin":"go.d:collector"},m=e=>{if(!e)return null;const{module_name:n,plugin_name:t}=e,o=h[t];return o&&n?"".concat(o,":").concat(n):null},g=function(){let e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];return(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).map((n=>function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},{meta:n={}}=e,t=(0,i.A)(e,c),o=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];const{monitored_instance:a,keywords:s=[],most_popular:A,community:l}=n,{name:h,categories:g=[],icon_filename:p}=a||n,f="deploy"===t.integration_type;return(0,r.bn)(d(d({name:h,categories:g.map((e=>{const n=o.find((n=>n.id===e));return{categoryId:e,name:null===n||void 0===n?void 0:n.name}})),icon:"https://www.netdata.cloud/img/"+p,keywords:s,mostPopular:A,community:l,dyncfgId:m(n)},t),f?{deployContent:u({installDescription:t.install_description,methods:t.methods,additionalInfo:t.additional_info,platformInfo:t.platform_info})}:{}))}(n,e)))},p=function(){const e=[],n=[];(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).forEach((t=>{void 0!==t.quickStart&&t.quickStart>=0?e.push(t):n.push(t)}));const t=[],o=[];n.forEach((e=>{e.mostPopular?t.push(e):o.push(e)}));const i=[],a=[];o.forEach((e=>{e.community?a.push(e):i.push(e)}));const s=(0,A.Wl)("quickStart"),r=(0,A.Kr)("name");return[...e.sort(s),...t.sort(r),...i.sort(r),...a.sort(r)]},f=e=>{let{searchTerm:n="",integrations:t=[]}=e;if(!n)return t;const o=n.toLocaleLowerCase();return t.filter((e=>{const{name:n,keywords:t,categories:i=[]}=e;if(n.toLocaleLowerCase().includes(o))return!0;const a=i.map((e=>{let{name:n}=e;return n})).filter(Boolean);return[...t,...a].join(",").toLocaleLowerCase().includes(o)}))},y=function(){let e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"id";const n=[];return(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).forEach((t=>{n.find((n=>n[e]===t[e]))||n.push(t)})),n},b=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:s();return e.reduce(((e,t)=>(t.categories.forEach((o=>{e.push(((e,n,t)=>d(d({},e),{},{flattenedKey:"".concat(e.id,"-").concat(n.categoryId),flattenedCategory:t(n.categoryId)}))(t,o,n))})),e)),[])}},69306(e,n,t){"use strict";t.d(n,{QZ:()=>A,aS:()=>c,bX:()=>s,g3:()=>a,ik:()=>r,ly:()=>d,xR:()=>l});var o=t(91130),i=t(15505);const a=e=>{let{spaceId:n,roomId:t,payload:i={}}=e;return o.A.post("/api/v1/spaces/".concat(n,"/rooms/").concat(t,"/insights/conversations"),i)},s=e=>{let{spaceId:n,roomId:t}=e;return o.A.get("/api/v1/spaces/".concat(n,"/rooms/").concat(t,"/insights/conversations"),{transform:i.bn})},r=e=>{let{spaceId:n,roomId:t,id:a}=e;return o.A.get("/api/v1/spaces/".concat(n,"/rooms/").concat(t,"/insights/conversations/").concat(a),{transform:i.bn})},A=e=>{let{spaceId:n,roomId:t,id:i,payload:a}=e;return o.A.post("/api/v1/spaces/".concat(n,"/rooms/").concat(t,"/insights/conversations/").concat(i,"/completion"),a,{streamResponse:!0})},c=e=>{let{spaceId:n,roomId:t,id:i,payload:a}=e;return o.A.post("/api/v1/spaces/".concat(n,"/rooms/").concat(t,"/insights/conversations/").concat(i,"/title"),a)},l=e=>{let{spaceId:n,roomId:t,id:i,payload:a}=e;return o.A.patch("/api/v1/spaces/".concat(n,"/rooms/").concat(t,"/insights/conversations/").concat(i),a)},d=e=>{let{spaceId:n,roomId:t,id:i}=e;return o.A.delete("/api/v1/spaces/".concat(n,"/rooms/").concat(t,"/insights/conversations/").concat(i))}},44790(e,n,t){"use strict";t.d(n,{A:()=>c});var o=t(64467),i=(t(84864),t(27495),t(98992),t(54520),t(3949),t(62953),t(96540)),a=t(22426);function s(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function r(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?s(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):s(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const A={searchTerm:""},c=()=>{const[{searchTerm:e},n]=(0,i.useState)(A),{loaded:t,value:o,error:s,reset:c}=(0,a.E)(),l=(0,i.useMemo)((()=>e?(o||[]).filter((n=>{let{id:t,title:o}=n;const i=new RegExp(e,"gi");return i.test(t)||i.test(o)})):o),[o,e]),d=(0,i.useCallback)((e=>{n((n=>r(r({},n),{},{searchTerm:e})))}),[n]);return{loaded:t,conversations:l,searchTerm:e,onSearchChange:d,error:s,reset:c}}},25147(e,n,t){"use strict";t.d(n,{C:()=>o,s:()=>i});const o=5,i="chat-ai-bottom-anchor"},55630(e,n,t){"use strict";t.d(n,{A:()=>l});t(98992),t(54520),t(3949);var o=t(64467),i=t(96540),a=(t(62953),t(52367)),s=t(61704);const r=()=>{const{accessorEnabled:e,open:n}=(0,a.A)(),{activeConversation:t,associateConversation:o}=(0,s.A)();return(0,i.useCallback)((async i=>{e&&i&&(n(),o({payload:{subjects:[...(null===t||void 0===t?void 0:t.subjects)||[],i]}}))}),[t,e,n])};function A(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function c(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?A(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):A(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const l=e=>{let{reportId:n}=e;const t=r();return(0,i.useCallback)((async function(){let{selection:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};t({type:"report",data:c({id:n},e?{selection:e}:{})})}),[n,t])}},61704(e,n,t){"use strict";t.d(n,{A:()=>E});var o=t(64467),i=(t(33110),t(98992),t(54520),t(3949),t(62953),t(96540)),a=t(24609),s=t(19186),r=t(69306),A=t(22426),c=t(75946),l=t(44790),d=t(98783);t(42762),t(81454);function u(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function h(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?u(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):u(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const m=e=>{let{chunk:n,set:t}=e;switch(n.type){case"message_start":(e=>{let{set:n,data:t}=e;if(null===t||void 0===t||!t.id)return null;n((e=>h(h({},e),{},{messages:[...(e||{}).messages||[],t]})))})({set:t,data:n.message});break;case"content_block_start":(e=>{let{set:n,index:t,data:o}=e;if(null===o||void 0===o||!o.type)return null;n((e=>{const n=(e||{}).messages||[];return h(h({},e),{},{messages:n.map(((e,i)=>i===n.length-1?h(h({},e),{},{content:[...e.content||[],h(h({},o),{},{index:t||0})]}):e))})}))})({set:t,index:n.index,data:n.content_block});break;case"content_block_delta":(e=>{let{set:n,index:t,data:o}=e;if(null===o||void 0===o||!o.type)return null;n((e=>{const n=(e||{}).messages||[],i=n.length-1;return h(h({},e),{},{messages:n.map(((e,n)=>{if(n===i){const n=e.content||[];return h(h({},e),{},{content:n.map(((e,i)=>{if(t&&t!==i)return e;if(!t&&i<n.length-1)return e;if("text_delta"===o.type&&o.text)return h(h({},e),{},{text:(e.text||"")+o.text});if("input_json_delta"===o.type&&o.partial_json){const n="string"===typeof e.input?e.input:"";return h(h({},e),{},{input:n+o.partial_json})}return e}))})}return e}))})}))})({set:t,index:n.index,data:n.delta});break;case"content_block_stop":(e=>{let{set:n,index:t}=e;n((e=>{const n=(e||{}).messages||[],o=n.length-1;return h(h({},e),{},{messages:n.map(((e,n)=>{if(n===o){const n=e.content||[];return h(h({},e),{},{content:n.map(((e,o)=>t&&t!==o||!t&&o<n.length-1?e:h(h({},e),{},{fullyParsed:!0})))})}return e}))})}))})({set:t,index:n.index});break;case"message_delta":(e=>{let{set:n,data:t}=e;const{delta:o,usage:i}=t||{};n((e=>{const n=(e||{}).messages||[],t=n.length-1;return h(h({},e),{},{messages:n.map(((e,n)=>n===t?h(h(h({},e),(null===o||void 0===o?void 0:o.stop_reason)&&{stop_reason:o.stop_reason}),i&&{usage:i}):e))})}))})({set:t,data:n});break;case"message_stop":(e=>{let{set:n}=e;n((e=>{const n=(e||{}).messages||[],t=n.length-1;return h(h({},e),{},{messages:n.map(((e,n)=>n===t?h(h({},e),{},{fullyParsed:!0}):e))})}))})({set:t})}};var g=t(1011),p=t(3319);function f(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function y(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?f(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):f(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const b={startConversation:null,createTitle:null,sendMessage:null,streamReader:null},E=()=>{const e=(0,a.vt)(),n=(0,s.ID)(),t=(0,d.A)(),{sendLog:o}=(0,p.A)(),{activeConversation:u,setActiveConversation:h,reset:f}=(0,A.i)(),{reset:E}=(0,l.A)(),w=(0,c.A)({showLoading:!0}),B=(0,c.A)({props:["metadata"]}),C=(0,i.useCallback)((e=>h((n=>y(y({},n),{},{isThinking:!0,thinkingLabel:e})))),[h]),M=(0,i.useCallback)((e=>h((n=>y(y({},n),{},{thinkingLabel:e})))),[h]),T=(0,i.useCallback)((()=>h((e=>y(y({},e),{},{isThinking:!1,thinkingLabel:""})))),[h]),I=(0,i.useCallback)((()=>h((e=>y(y({},e),{},{isParsing:!0})))),[h]),v=(0,i.useCallback)((()=>h((e=>y(y({},e),{},{isParsing:!1})))),[h]),_=(0,i.useCallback)((e=>{h((n=>y(y({},n),{},{reportMode:e})))}),[h]),Q=(0,i.useCallback)(((e,n)=>{const t=e.errorMessage||e.message||"Something went wrong";h((e=>y(y({},e),{},{isThinking:!1,error:t,messages:[...e.messages||[],{id:"error",role:"assistant",content:[{type:"error",error:t}]}]}))),o({element:"useChat",isFailure:!0,sessionId:n,description:"Error occurred during chat operation"},!0)}),[h,o]),D=(0,i.useCallback)((e=>{h((n=>y(y({},n),{},{associatedItemSelector:e})))}),[h]),x=(0,i.useCallback)((e=>{h((n=>n.associatedItemSelector?y(y({},n),{},{associatedItemSelector:y(y({},n.associatedItemSelector),{},{selectedItems:e})}):n))}),[h]),k=(0,i.useCallback)((async i=>{const a=(0,g.A)();o({element:"useChat",isStart:!0,sessionId:a,description:"Start sending message",messagePreview:i.substring(0,200)},!0),C();let s=null===u||void 0===u?void 0:u.id;if(null===u||void 0===u||!u.id){await h((e=>y(y({},e),{},{title:i.substring(0,20),loaded:!0}))),M("Starting conversation");const t=(0,r.g3)({spaceId:e,roomId:n});b.startConversation=t;const A=await t.catch((e=>Q(e,a))),{data:c}=A||{};if(null!==c&&void 0!==c&&c.id){s=c.id,M("Creating conversation title");const t=(0,r.aS)({spaceId:e,roomId:n,id:s,payload:{message_content:i}});b.createTitle=t;const A=await t.catch((e=>Q(e,a))),l=null===A||void 0===A?void 0:A.data;await h((e=>y(y(y({},e),c),{},{title:null===l||void 0===l?void 0:l.title}))),E(),o({element:"useChat",isSuccess:!0,sessionId:a,description:"Created new conversation",title:null===l||void 0===l?void 0:l.title,conversationId:s},!0)}}if(s){var A;const c=(null===u||void 0===u?void 0:u.messages)||[],l=null===(A=c[c.length-1])||void 0===A?void 0:A.id;C("Processing your message");const d=(0,r.QZ)({spaceId:e,roomId:n,id:s,payload:{message:i,parent_message_id:l,tools:[{name:"blocks",version:0},...null!==u&&void 0!==u&&u.reportMode?[{name:"reports",version:0}]:[]],context:t}});b.sendMessage=d;const g=await d.catch((e=>Q(e,a)));if(o({element:"useChat",isSuccess:!0,sessionId:a,description:"Got response for sent message",conversationId:s},!0),T(),null!==g&&void 0!==g&&g.data){I();const e=await(async(e,n)=>{const t=e.pipeThrough(new TextDecoderStream).getReader();let o="";for(;;){const{value:e,done:a}=await t.read();if(a)break;o+=e;const s=o.split("\n");o=s.pop()||"";for(const t of s)if(t.startsWith("data: ")){const e=t.slice(6).trim();if(e)try{n(JSON.parse(e))}catch(i){console.error("Failed to parse SSE chunk:",i,e)}}}return t})(g.data,(e=>m({chunk:e,set:h})));b.streamReader=e,v()}b.startConversation=null,b.createTitle=null,b.sendMessage=null,b.streamReader=null}else T();B(s)}),[e,n,u,E,t,B,h,C,M,T,I,v,Q,o]),R=(0,i.useCallback)((async function(){let{payload:t={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const i=(0,g.A)();o({element:"useChat",isStart:!0,sessionId:i,description:"Start associating conversation",payload:JSON.stringify(t)},!0),C();let a=null===u||void 0===u?void 0:u.id;if(null!==u&&void 0!==u&&u.id)M("Adding conversation subject"),await(0,r.xR)({spaceId:e,roomId:n,id:u.id,payload:t}),w(u.id);else{await h((e=>y(y({},e),{},{title:"Associated conversation",loaded:!0}))),M("Starting conversation");const s=(0,r.g3)({spaceId:e,roomId:n,payload:t});b.startConversation=s;const A=await s.catch((e=>Q(e,i))),{data:c}=A||{};if(null!==c&&void 0!==c&&c.id){a=c.id,M("Creating conversation title");const s=(0,r.aS)({spaceId:e,roomId:n,id:a,payload:{message_content:JSON.stringify(t)}});b.createTitle=s;const A=await s.catch((e=>Q(e,i))),l=null===A||void 0===A?void 0:A.data;await h((e=>y(y(y({},e),c),{},{title:null===l||void 0===l?void 0:l.title}))),E(),o({element:"useChat",isSuccess:!0,sessionId:i,description:"Created new conversation",title:null===l||void 0===l?void 0:l.title,conversationId:a},!0)}}T(),B(a)}),[e,n,u,E,w,B,h,C,M,T,Q,o]),S=(0,i.useCallback)((()=>{var e,n,t,o,i,a,s,r;null===(e=b.startConversation)||void 0===e||null===(n=e.cancel)||void 0===n||n.call(e),b.startConversation=null,null===(t=b.createTitle)||void 0===t||null===(o=t.cancel)||void 0===o||o.call(t),b.createTitle=null,null===(i=b.sendMessage)||void 0===i||null===(a=i.cancel)||void 0===a||a.call(i),b.sendMessage=null,null===(s=b.streamReader)||void 0===s||null===(r=s.cancel)||void 0===r||r.call(s),b.streamReader=null,h((e=>y(y({},e),{},{messages:[...e.messages||[],{id:"error",role:"assistant",content:[{type:"error",error:"Cancelled"}]}],isThinking:!1,isParsing:!1})))}),[h]);return(0,i.useEffect)((()=>()=>{var e,n,t,o,i,a,s,r;null===(e=b.startConversation)||void 0===e||null===(n=e.cancel)||void 0===n||n.call(e),null===(t=b.createTitle)||void 0===t||null===(o=t.cancel)||void 0===o||o.call(t),null===(i=b.sendMessage)||void 0===i||null===(a=i.cancel)||void 0===a||a.call(i),null===(s=b.streamReader)||void 0===s||null===(r=s.cancel)||void 0===r||r.call(s)}),[]),{activeConversation:u,sendMessage:k,associateConversation:R,setReportMode:_,setAssociatedItemSelector:D,setAssociatedItemSelectorItems:x,resetActiveConversation:f,stopProcessing:S}}},52367(e,n,t){"use strict";t.d(n,{A:()=>ho});t(98992),t(54520),t(3949);var o=t(64467),i=t(96540),a=t(43119),s=t(31141),r=t(80045),A=(t(62953),t(51510)),c=t(42358),l=t(43375),d=t(24609),u=(t(81454),t(17702)),h=t(61704);const m="new-chat",g="conversations",p="artifacts",f=[{id:m,label:"New Chat"},{id:g,label:"Chats"}],y=m;var b=t(74848);function E(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function w(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?E(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):E(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const B=(0,A.default)(c.Flex).attrs((e=>w({height:5,cursor:"move",alignItems:"center",justifyContent:"center"},e))).withConfig({displayName:"header__MoveBar",componentId:"sc-1n1n09d-0"})(["> div{width:40px;height:2px;background-color:",";}&:hover{background-color:",";}"],(0,c.getColor)("secondaryHighlightAI"),(0,c.getRgbColor)("secondaryHighlightAI",.2)),C=e=>{let{listeners:n,attributes:t,activeTab:o,expanded:a,onExpandClick:s,onTabChange:r,onClose:A}=e;const{activeConversation:l,resetActiveConversation:d}=(0,h.A)(),{isThinking:g}=l||{},p=(0,i.useCallback)((e=>{e===m&&d(),r(e)}),[r,d]);return(0,b.jsxs)(c.Flex,{column:!0,children:[(0,b.jsx)(B,w(w(w({},n),t),{},{children:(0,b.jsx)("div",{})})),(0,b.jsxs)(c.Flex,{"data-testid":"chat-ai-modal-header",alignItems:"center",justifyContent:"between",children:[(0,b.jsx)(c.Flex,{alignItems:"center",gap:2,children:f.map((e=>{let{id:n,label:t}=e;const i=n===o;return(0,b.jsx)(u.A,w({"data-testid":"chat-ai-tab-".concat(n),label:t,textTransform:"none",icon:"",onClick:()=>p(n),disabled:g},i?{}:{flavour:"hollow"}),t)}))}),(0,b.jsxs)(c.Flex,{alignItems:"center",children:[(0,b.jsx)(c.IconButton,{icon:a?"reduceSize":"fullScreen",neutral:!0,onClick:s}),(0,b.jsx)(c.IconButton,{icon:"x",neutral:!0,onClick:A})]})]})]})};var M=t(44790),T=t(19186),I=t(69306),v=t(63872),_=t(3319);const Q=e=>{let{id:n,onSuccess:t}=e;const o=(0,d.vt)(),a=(0,T.ID)(),[s,r]=(0,v.A)(),{sendLog:A}=(0,_.A)();return(0,i.useCallback)((()=>{A({element:"useDeleteConversation",isStart:!0,description:"Deleting conversation",conversationId:n},!0),(0,I.ly)({spaceId:o,roomId:a,id:n}).then((()=>{"function"===typeof t&&t(),s({header:"Success",text:"You have successfully deleted a conversation item"}),A({element:"useDeleteConversation",isSusccess:!0,description:"Conversation deleted successfully",conversationId:n},!0)})).catch((e=>{r(e),A({element:"useDeleteConversation",isFailure:!0,description:"Error deleting conversation",conversationId:n},!0)}))}),[o,a,n,s,r,t,A])};var D=t(6304);const x=["id","title","onConfirm","onDecline"];function k(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function R(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?k(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):k(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const S=e=>{let{id:n,title:t,onConfirm:o,onDecline:i}=e,a=(0,r.A)(e,x);const s=t||n;return(0,b.jsx)(c.ConfirmationDialog,R({className:"conversation-item-delete-modal",title:"Delete item?",handleConfirm:o,handleDecline:i,confirmLabel:"Yes, delete",declineLabel:"No",backdropContainerProps:{zIndex:"120",className:"conversation-item-delete-modal"},message:(0,b.jsxs)(c.Flex,{column:!0,gap:2,children:[(0,b.jsxs)(c.TextBig,{children:["You are going to delete ",(0,b.jsx)(c.TextBig,{strong:!0,children:s})," conversation, and this action cannot be reverted."]}),(0,b.jsx)(c.TextBig,{children:"Are you sure that you want to delete this item?"})]})},a))},P=e=>{let{id:n,title:t,disabled:o,Accessor:a,onSuccess:s,confirmationDialogProps:r={}}=e;const{reset:A}=(0,M.A)(),[l,,d,u]=(0,D.A)(),h=(0,i.useCallback)((()=>{o||(u(),A(),"function"===typeof s&&s())}),[o,u,A,s]),m=Q({id:n,onSuccess:h});return(0,b.jsxs)(b.Fragment,{children:[a?(0,b.jsx)(a,{className:"btn-conversation-item-delete",onClick:d,disabled:o}):(0,b.jsx)(c.IconButton,{className:"btn-conversation-item-delete",icon:"trashcan",disabled:o,onClick:d}),l&&(0,b.jsx)(S,R({id:n,title:t,onConfirm:m,onDecline:u},r))]})};var F=t(63950),Y=t.n(F),U=t(74891);function N(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function j(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?N(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):N(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const z=(0,U.A)(c.Flex),H=(0,U.A)(c.Icon),O={report:"view_list",default:"view_list"},L=()=>(0,b.jsxs)(c.Flex,{column:!0,gap:2,children:[(0,b.jsx)(c.TextBig,{children:"You are about to delete this subject from the conversation."}),(0,b.jsx)(c.TextBig,{children:"Are you sure you want to continue?"})]}),G=e=>{let{type:n,label:t="",tooltip:o,tooltipProps:a={},onDeleteConfirmation:s=Y()}=e;const[r,,A,l]=(0,D.A)(),d=(0,i.useCallback)((()=>{"function"===typeof s&&s(),l()}),[s,l]);return(0,b.jsxs)(b.Fragment,{children:[(0,b.jsxs)(z,{alignItems:"center",gap:3,padding:[.5,2],margin:[1,0,0,0],border:{side:"all",color:"primaryAI"},round:!0,tooltip:o,tooltipProps:j({align:"bottom"},a),children:[(0,b.jsxs)(c.Flex,{alignItems:"center",gap:2,children:[(0,b.jsx)(c.Icon,{name:O[n]||O.default,color:"primaryAI",width:"14px",height:"14px"}),(0,b.jsx)(c.Text,{color:"primaryAI",whiteSpace:"nowrap",children:t})]}),(0,b.jsx)(H,{name:"x",color:"primaryAI",cursor:"pointer",width:"14px",height:"14px",onClick:A,tooltip:"Remove subject",tooltipProps:{align:"bottom"},noWrapper:!0})]}),r&&(0,b.jsx)(c.ConfirmationDialog,{confirmLabel:"Yes, delete",handleConfirm:d,handleDecline:l,message:(0,b.jsx)(L,{}),title:"Delete Subject",backdropContainerProps:{zIndex:"120"}})]})};var J=t(93912),q=t(81685);const K=e=>{let{id:n,onSuccess:t=Y(),onFail:o=Y()}=e;const a=(0,d.vt)(),s=(0,T.ID)();return(0,i.useCallback)((e=>{(0,I.xR)({spaceId:a,roomId:s,id:n,payload:e}).then((e=>{let{data:n}=e;"function"===typeof t&&t(n)})).catch(o)}),[a,s,n,t,o])};var V=t(75946);const X=A.default.blockquote.withConfig({displayName:"reportSubject__Blockquote",componentId:"sc-1ltbdu7-0"})(["font-style:italic;font-size:12px;"]),W=e=>{let{label:n,children:t}=e;return(0,b.jsxs)(c.Flex,{column:!0,gap:1,children:[(0,b.jsx)(c.Text,{strong:!0,children:n}),(0,b.jsx)(c.Flex,{padding:[0,0,0,2],children:t})]})},Z=e=>{var n,t;let{report:o,subject:i}=e;if(!o&&!i)return null;const a=null!==i&&void 0!==i&&null!==(n=i.data)&&void 0!==n&&n.selection?"Selected report text":"Report";return(0,b.jsx)(J.A,{title:a,gap:2,padding:[3],children:(0,b.jsxs)(c.Flex,{column:!0,gap:2,children:[null!==o&&void 0!==o&&o.name?(0,b.jsx)(W,{label:"Report name",children:(0,b.jsx)(c.Text,{children:o.name})}):null!==o&&void 0!==o&&o.id?(0,b.jsx)(W,{label:"Report ID",children:(0,b.jsx)(c.Text,{children:o.id})}):null,null!==i&&void 0!==i&&null!==(t=i.data)&&void 0!==t&&t.selection?(0,b.jsx)(W,{label:"Text",children:(0,b.jsx)(X,{children:i.data.selection.length<=100?i.data.selection:"".concat(i.data.selection.slice(0,100),"...")})}):null]})})},$={report:e=>{let{conversationId:n,subject:t={},subjects:o=[]}=e;const{id:a,type:s,data:r}=t,{loaded:A,report:l,error:d}=(0,q.ef)({id:null===r||void 0===r?void 0:r.id}),u=(0,V.A)({showLoading:!0}),h=(0,i.useMemo)((()=>d?"-":A?null!==r&&void 0!==r&&r.selection?r.selection.length<=20?r.selection:"".concat(r.selection.slice(0,20),"..."):null!==l&&void 0!==l&&l.name?l.name:a||"":"Loading report..."),[A,l,d,r,a]),m=(0,i.useCallback)((()=>{u(n)}),[n,u]),g=K({id:n,onSuccess:m}),p=(0,i.useCallback)((()=>{a&&g({subjects:o.filter((e=>e.id!==a))})}),[a,o,g]);return d?(0,b.jsx)(c.Text,{children:d}):(0,b.jsx)(G,{type:s,label:h,tooltip:()=>(0,b.jsx)(Z,{report:l,subject:t}),onDeleteConfirmation:p})}},ee=e=>{let{id:n,subjects:t=[]}=e;return t.length?(0,b.jsx)(c.Flex,{alignItems:"center",gap:2,flexWrap:!0,children:t.map(((e,o)=>{const i=$[e.type];return i?(0,b.jsx)(i,{conversationId:n,subject:e,subjects:t},o):null}))}):null};var ne=t(3098),te=t(427);const oe=["id","disabled","onClose"],ie=["onClick","disabled"];function ae(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function se(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?ae(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):ae(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const re=(0,A.default)(c.Flex).attrs((e=>se({column:!0,border:{side:"all",color:"border"},round:.5},e))).withConfig({displayName:"threadHeader__MenuContainer",componentId:"sc-149fqoy-0"})([""]),Ae=(0,A.default)(c.Flex).attrs((e=>se({alignItems:"center",gap:2,padding:[2,3.5],cursor:"pointer"},e))).withConfig({displayName:"threadHeader__StyledFlex",componentId:"sc-149fqoy-1"})(["&:hover{background-color:",";}"],(0,c.getColor)("mainBackground")),ce=e=>{let{id:n,disabled:t,onClose:o}=e,a=(0,r.A)(e,oe);const s=(0,V.A)({showLoading:!0}),A=(0,i.useCallback)((()=>{n&&s(n),"function"===typeof o&&o()}),[n,s,o]);return(0,b.jsxs)(Ae,se(se({onClick:A,disabled:t},a),{},{children:[(0,b.jsx)(c.Icon,{name:"refresh",color:"text",width:"14px",height:"14px"}),(0,b.jsx)(c.TextBig,{children:"Reload conversation"})]}))},le=e=>{let{onClick:n,disabled:t}=e,o=(0,r.A)(e,ie);return(0,b.jsxs)(Ae,se(se({onClick:n,disabled:t},o),{},{children:[(0,b.jsx)(c.Icon,{name:"trashcan",color:"text",width:"14px",height:"14px"}),(0,b.jsx)(c.TextBig,{children:"Delete conversation"})]}))},de=e=>{let{id:n,title:t,canChat:o}=e;const a=(0,i.useRef)(),{resetActiveConversation:s}=(0,h.A)(),[r,A,,l]=(0,D.A)(),d=(0,i.useCallback)((()=>{l(),s()}),[l,s]);return(0,b.jsxs)(b.Fragment,{children:[(0,b.jsx)(c.Flex,{ref:a,children:(0,b.jsx)(c.Icon,{name:"chevron_left",size:"small",color:"textLite",rotate:r?1:3,onClick:A,cursor:"pointer"})}),a.current&&r?(0,b.jsx)(c.Drop,{"data-testid":"chat-thread-header-delete-drop",width:60,target:a.current,align:{top:"bottom",left:"left"},background:"darkBackground",margin:[2,0,0],round:!0,dataDrop:"chat-thread-header-delete-drop",onClickOutside:l,onEsc:l,zIndex:100,children:(0,b.jsxs)(re,{children:[(0,b.jsx)(ce,{id:n,disabled:!o,onClose:l}),(0,b.jsx)(P,{id:n,title:t,disabled:!o,Accessor:le,onSuccess:d,confirmationDialogProps:{"data-drop":"chat-thread-header-delete-drop"}})]})}):null]})},ue=e=>{let{metadata:n}=e;const{locale:t}=(0,te.bO)(),{credits:o}=(0,ne.A)(),{usage:a}=n||{},{amountMicrocredits:s}=a||{},r=(0,i.useMemo)((()=>new Intl.NumberFormat(t,{maximumFractionDigits:4})),[t]);return o>.9||!s?null:(0,b.jsx)(c.Flex,{padding:[1,4],border:{side:"all",color:"primaryAI"},round:4,children:(0,b.jsxs)(c.Text,{color:"primaryAI",children:["Used credits:"," ",(0,b.jsx)(c.Text,{strong:!0,color:"primaryAI",children:r.format(s/1e6)})]})})},he=e=>{let{id:n,title:t,metadata:o,subjects:i=[],canChat:a}=e;return(0,b.jsxs)(c.Flex,{column:!0,gap:2,padding:[0,0,4,0],children:[(0,b.jsxs)(c.Flex,{"data-testid":"chat-ai-modal-thread-header-container",alignItems:"center",justifyContent:"between",children:[(0,b.jsxs)(c.Flex,{alignItems:"center",gap:2,children:[(0,b.jsx)(c.TextBig,{color:"primaryAI",children:t}),(0,b.jsx)(de,{id:n,title:t,canChat:a})]}),(0,b.jsx)(ue,{metadata:o})]}),(0,b.jsx)(ee,{id:n,subjects:i})]})};t(27495),t(42762),t(25509),t(65223),t(60321),t(41927),t(11632),t(64377),t(66771),t(12516),t(68931),t(52514),t(35694),t(52774),t(49536),t(21926),t(94483),t(16215);const me=(0,A.keyframes)(["0%,100%{opacity:0;transform:translateY(0) scale(0);}15%{opacity:0.8;transform:scale(1);}85%{opacity:0.8;}100%{opacity:0;transform:translateY(-120px) scale(0);}"]),ge=(0,A.keyframes)(["0%{transform:translate(-50%,-50%) scale(0);opacity:0.6;}100%{transform:translate(-50%,-50%) scale(1);opacity:0;}"]),pe=(0,A.keyframes)(["0%,100%{stroke:var(--strokeHighlight);stroke-opacity:0.6;}50%{stroke:var(--strokeHighlight);stroke-opacity:1;}"]),fe=A.default.div.withConfig({displayName:"netdataIcon__Container",componentId:"sc-13r4hwb-0"})(["position:relative;width:88px;height:auto;overflow:hidden;--strokeMain:",";--strokeHighlight:",";--aiParticleMain:",";"],(()=>(0,c.getColor)("strokeMain")),(()=>(0,c.getColor)("strokeHighlight")),(()=>(0,c.getColor)("aiParticleMain"))),ye=A.default.svg.withConfig({displayName:"netdataIcon__CircuitSvg",componentId:"sc-13r4hwb-1"})(["width:100%;height:100%;"]),be=A.default.path.withConfig({displayName:"netdataIcon__LogoBorder",componentId:"sc-13r4hwb-2"})(["animation:"," 3s ease-in-out infinite;"],pe),Ee=A.default.div.withConfig({displayName:"netdataIcon__Particle",componentId:"sc-13r4hwb-3"})(["position:absolute;width:3px;height:3px;background:var(--strokeHighlight);border-radius:50%;opacity:0;box-shadow:0 0 6px var(--strokeMain);animation:"," 8s ease-in-out infinite;"," ",""],me,(e=>{let{$delay:n}=e;return n&&(0,A.css)(["animation-delay:","s;"],n)}),(e=>{let{$left:n}=e;return n&&(0,A.css)(["left:","%;"],n)})),we=(A.default.div.withConfig({displayName:"netdataIcon__EnergyWave",componentId:"sc-13r4hwb-4"})(["position:absolute;top:50%;left:55%;width:75px;height:75px;border:2px solid var(--strokeMain);border-radius:50%;transform:translate(-50%,-50%) scale(0);opacity:0;animation:"," 4s ease-out infinite;&:nth-child(2){animation-delay:1.3s;}&:nth-child(3){animation-delay:2.6s;}"],ge),A.default.canvas.withConfig({displayName:"netdataIcon__PulseCanvas",componentId:"sc-13r4hwb-5"})(["position:absolute;top:0;left:0;width:100%;height:100%;pointer-events:none;"])),Be=()=>{const e=(0,i.useRef)(null),n=(0,i.useRef)([]),t=(0,i.useRef)(new Set),o=(0,i.useRef)(null),a=(0,i.useRef)([]),s=((0,i.useRef)(null),(0,i.useRef)(null));return(0,i.useEffect)((()=>{var i;const r=e.current;if(!r)return;const A=null===(i=r.parentElement)||void 0===i?void 0:i.querySelector("svg");if(!A)return;const c=r.closest("div");s.current=((e,n)=>{const t=getComputedStyle(e).getPropertyValue(n).trim();if(t.startsWith("#")){const e=t.slice(1),n=parseInt(e.substr(0,2),16),o=parseInt(e.substr(2,2),16),i=parseInt(e.substr(4,2),16);return"".concat(n,", ").concat(o,", ").concat(i)}const o=t.match(/\d+/g);return o?o.slice(0,3).join(", "):"14, 182, 240"})(c,"--aiParticleMain");const l=A.getBoundingClientRect();r.width=2*l.width,r.height=2*l.height,r.style.width=l.width+"px",r.style.height=l.height+"px",r.getContext("2d").scale(2,2);const d=Array.from(document.querySelectorAll(".circuit-path"));a.current=d;const u=()=>{(()=>{const e=A.getBoundingClientRect().width/189,t=A.getBoundingClientRect().height/154;for(let o=n.current.length-1;o>=0;o--){const i=n.current[o],a=i.targetX-i.x,s=i.targetY-i.y,r=Math.sqrt(a*a+s*s);if(r>1)i.x+=a/r*i.speed,i.y+=s/r*i.speed;else{const n=d[Math.floor(Math.random()*d.length)].getBBox();i.targetX=(n.x+n.width/2)*e,i.targetY=(n.y+n.height/2)*t}i.life-=i.decay,i.life<=0&&n.current.splice(o,1)}})(),(()=>{const e=r.getContext("2d");e.clearRect(0,0,r.width,r.height);const t=s.current;for(const o of n.current){const n=o.life,i=e.createRadialGradient(o.x,o.y,0,o.x,o.y,5*o.size);i.addColorStop(0,"rgba(".concat(t,", ").concat(n,")")),i.addColorStop(.3,"rgba(".concat(t,", ").concat(.5*n,")")),i.addColorStop(1,"rgba(".concat(t,", 0)")),e.beginPath(),e.arc(o.x,o.y,5*o.size,0,2*Math.PI),e.fillStyle=i,e.fill(),e.beginPath(),e.arc(o.x,o.y,o.size,0,2*Math.PI),e.fillStyle="rgba(".concat(t,", ").concat(n,"))"),e.fill()}})(),o.current=requestAnimationFrame(u)};u();const h=setInterval((()=>{n.current.length<20&&(()=>{const e=d.length,t=Math.floor(Math.random()*e),o=d[t];if(!o)return;const i=o.getBBox(),a=A.getBoundingClientRect().width/189,s=A.getBoundingClientRect().height/154,r=(i.x+i.width/2)*a,c=(i.y+i.height/2)*s;n.current.push({x:r,y:c,targetX:r+10*(Math.random()-.5),targetY:c+10*(Math.random()-.5),life:1,decay:.05,speed:.025+1*Math.random(),size:.05+3*Math.random(),pathIndex:t}),o.style.fill="var(--strokeHighlight)",o.style.opacity="1",o.style.filter="drop-shadow(0 0 6px var(--strokeHighlight))",setTimeout((()=>{o.style.fill="",o.style.opacity="",o.style.filter=""}),200+400*Math.random())})()}),100),m=setInterval((()=>{(()=>{const e=d.length,n=Math.floor(5*Math.random())+2;for(let o=0;o<n;o++){const n=Math.floor(Math.random()*e),o=d[n];o&&!t.current.has(n)&&(t.current.add(n),o.style.fill="var(--strokeHighlight)",o.style.opacity="0.9",o.style.filter="drop-shadow(0 0 4px var(--strokeHighlight))",setTimeout((()=>{o&&(o.style.fill="",o.style.opacity="",o.style.filter=""),t.current.delete(n)}),150+500*Math.random()))}})()}),130);return()=>{o.current&&cancelAnimationFrame(o.current),clearInterval(h),clearInterval(m)}}),[]),(0,b.jsx)(we,{ref:e})},Ce=()=>(0,b.jsxs)(fe,{children:[(0,b.jsxs)(ye,{viewBox:"0 0 189 154",fill:"none",xmlns:"http://www.w3.org/2000/svg",children:[(0,b.jsxs)("defs",{children:[(0,b.jsx)("clipPath",{id:"logoClipPath",children:(0,b.jsx)("path",{d:"M110.907 0.5L111.91 0.508789C153.955 1.13032 187.938 35.823 188.493 78.7686L188.5 79.793C188.431 120.554 156.111 153.5 116.26 153.5H78.4404L0.814453 0.5H110.907Z"})}),(0,b.jsxs)("filter",{id:"glow",x:"-50%",y:"-50%",width:"200%",height:"200%",children:[(0,b.jsx)("feGaussianBlur",{stdDeviation:"2",result:"coloredBlur"}),(0,b.jsxs)("feMerge",{children:[(0,b.jsx)("feMergeNode",{in:"coloredBlur"}),(0,b.jsx)("feMergeNode",{in:"SourceGraphic"})]})]}),(0,b.jsx)("mask",{id:"mask0_332_2837",style:{maskType:"alpha"},maskUnits:"userSpaceOnUse",x:"0",y:"0",width:"189",height:"154",children:(0,b.jsx)("path",{d:"M110.907 0.5L111.91 0.508789C153.955 1.13032 187.938 35.823 188.493 78.7686L188.5 79.793C188.431 120.554 156.111 153.5 116.26 153.5H78.4404L0.814453 0.5H110.907Z",fill:"white",stroke:"#56FF99"})})]}),(0,b.jsx)(be,{d:"M110.907 0.5L111.91 0.508789C153.955 1.13032 187.938 35.823 188.493 78.7686L188.5 79.793C188.431 120.554 156.111 153.5 116.26 153.5H78.4404L0.814453 0.5H110.907Z",fill:"none",stroke:"#56FF99",strokeWidth:"1"}),(0,b.jsx)("g",{mask:"url(#mask0_332_2837)",children:(0,b.jsxs)("g",{id:"circuitPaths",style:{transform:"translate(0, -22px) scale(1.1)"},children:[(0,b.jsx)("path",{className:"circuit-path","data-id":"1",d:"M83 147.5C84.1046 147.5 85 148.395 85 149.5H92.5C93.8806 149.5 94.9998 150.619 95 152V161.77C95.5973 162.115 96 162.76 96 163.5C96 164.605 95.1046 165.5 94 165.5C92.8954 165.5 92 164.605 92 163.5C92 162.395 92.8954 161.5 94 161.5V152C93.9998 151.172 93.3283 150.5 92.5 150.5H84.7305C84.3845 151.097 83.7399 151.5 83 151.5C81.8954 151.5 81 150.605 81 149.5C81 148.395 81.8954 147.5 83 147.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"2",d:"M73 131.5C74.1046 131.5 75 132.395 75 133.5C75 134.432 74.3622 135.211 73.5 135.434V158.565C74.3623 158.788 75 159.568 75 160.5C75 161.605 74.1046 162.5 73 162.5C71.8954 162.5 71 161.605 71 160.5C71 159.568 71.6377 158.788 72.5 158.565V135.434C71.6378 135.211 71 134.432 71 133.5C71 132.395 71.8954 131.5 73 131.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"3",d:"M103 148.5C104.105 148.5 105 149.395 105 150.5C105 151.432 104.362 152.211 103.5 152.434V159.796C103.5 160.989 104.826 161.704 105.823 161.05L118.449 152.763C118.169 152.418 118 151.979 118 151.5C118 150.395 118.895 149.5 120 149.5C121.105 149.5 122 150.395 122 151.5C122 152.605 121.105 153.5 120 153.5C119.761 153.5 119.533 153.456 119.32 153.379C119.305 153.392 119.291 153.407 119.274 153.418L106.372 161.886C104.71 162.976 102.5 161.784 102.5 159.796V152.434C101.638 152.211 101 151.432 101 150.5C101 149.395 101.895 148.5 103 148.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"4",d:"M50 127.5C51.1046 127.5 52 128.395 52 129.5C52 129.691 51.9709 129.875 51.9209 130.05L64.9844 139.496C65.6343 139.966 66.0195 140.72 66.0195 141.522V148C66.0193 149.38 64.9 150.5 63.5195 150.5H49.7305C49.3845 151.097 48.7399 151.5 48 151.5C46.8954 151.5 46 150.605 46 149.5C46 148.395 46.8954 147.5 48 147.5C49.1046 147.5 50 148.395 50 149.5H63.5195C64.3477 149.5 65.0193 148.828 65.0195 148V141.522C65.0195 141.041 64.7883 140.589 64.3984 140.307L51.4121 130.915C51.0503 131.276 50.5515 131.5 50 131.5C48.8954 131.5 48 130.605 48 129.5C48 128.395 48.8954 127.5 50 127.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"5",d:"M135 130.5C136.105 130.5 137 131.395 137 132.5C137 133.605 136.105 134.5 135 134.5C134.513 134.5 134.068 134.325 133.721 134.036L114.907 144.898C114.967 145.088 115 145.29 115 145.5C115 146.605 114.105 147.5 113 147.5C111.895 147.5 111 146.605 111 145.5C111 144.395 111.895 143.5 113 143.5C113.533 143.5 114.016 143.71 114.375 144.051L133.136 133.22C133.049 132.996 133 132.754 133 132.5C133 131.395 133.895 130.5 135 130.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"6",d:"M142 124.5C143.105 124.5 144 125.395 144 126.5C144 127.432 143.362 128.211 142.5 128.434V135.796C142.5 136.722 141.988 137.573 141.17 138.006L126.999 145.508C126.995 146.609 126.102 147.5 125 147.5C123.895 147.5 123 146.605 123 145.5C123 144.395 123.895 143.5 125 143.5C125.746 143.5 126.394 143.909 126.738 144.514L140.702 137.122C141.193 136.862 141.5 136.351 141.5 135.796V128.434C140.638 128.211 140 127.432 140 126.5C140 125.395 140.895 124.5 142 124.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"7",d:"M23 114.5C24.1046 114.5 25 115.395 25 116.5C25 117.24 24.5971 117.884 24 118.229V121.5C23.9998 122.881 22.8806 124 21.5 124H15C14.1717 124 13.5002 124.672 13.5 125.5V125.906C13.5003 126.417 13.7609 126.893 14.1914 127.169L37.5537 142.121C37.918 141.739 38.4306 141.5 39 141.5C40.1046 141.5 41 142.395 41 143.5C41 144.605 40.1046 145.5 39 145.5C37.8954 145.5 37 144.605 37 143.5C37 143.326 37.0245 143.157 37.0664 142.996L13.6523 128.012C12.9346 127.552 12.5003 126.758 12.5 125.906V125.5C12.5002 124.119 13.6194 123 15 123H21.5C22.3283 123 22.9998 122.328 23 121.5V118.5C21.8954 118.5 21 117.605 21 116.5C21 115.395 21.8954 114.5 23 114.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"8",d:"M34 121.5C35.1046 121.5 36 122.395 36 123.5C36 124.432 35.3622 125.211 34.5 125.434V129.012C34.5001 129.479 34.718 129.919 35.0889 130.203L50.126 141.702C50.3902 141.573 50.6864 141.5 51 141.5C52.1046 141.5 53 142.395 53 143.5C53 144.605 52.1046 145.5 51 145.5C49.8954 145.5 49 144.605 49 143.5C49 143.08 49.1293 142.69 49.3506 142.368L34.4814 130.998C33.863 130.525 33.5001 129.79 33.5 129.012V125.434C32.6378 125.211 32 124.432 32 123.5C32 122.395 32.8954 121.5 34 121.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"9",d:"M152 114.5C153.079 114.5 153.956 115.355 153.995 116.425L166.587 122.504C167.451 122.921 168 123.796 168 124.756V128.945C168 129.77 167.593 130.543 166.912 131.009L149.995 142.582C149.952 143.649 149.077 144.5 148 144.5C146.895 144.5 146 143.605 146 142.5C146 141.395 146.895 140.5 148 140.5C148.755 140.5 149.413 140.919 149.753 141.537L166.347 130.184C166.755 129.904 167 129.44 167 128.945V124.756C167 124.18 166.67 123.656 166.152 123.405L153.771 117.427C153.436 118.064 152.77 118.5 152 118.5C150.895 118.5 150 117.605 150 116.5C150 115.395 150.895 114.5 152 114.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"10",d:"M124 128.5C125.105 128.5 126 129.395 126 130.5C126 131.605 125.105 132.5 124 132.5C123.445 132.5 122.943 132.273 122.581 131.908L106.693 140.688C106.323 140.893 105.907 141 105.484 141H84.9346C84.7123 141.862 83.9317 142.5 83 142.5C81.8954 142.5 81 141.605 81 140.5C81 139.395 81.8954 138.5 83 138.5C83.9317 138.5 84.7123 139.138 84.9346 140H105.484C105.738 140 105.988 139.936 106.21 139.813L122.076 131.044C122.027 130.871 122 130.689 122 130.5C122 129.395 122.895 128.5 124 128.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"11",d:"M150 121.5C151.075 121.5 151.949 122.348 151.995 123.412L157.71 126.08C158.489 126.444 159.026 127.182 159.133 128.035L159.329 129.605C159.455 130.613 158.959 131.597 158.074 132.095L151.999 135.512C151.993 136.611 151.101 137.5 150 137.5C148.895 137.5 148 136.605 148 135.5C148 134.395 148.895 133.5 150 133.5C150.745 133.5 151.393 133.908 151.737 134.512L157.584 131.223C158.115 130.924 158.412 130.334 158.337 129.729L158.141 128.159C158.077 127.647 157.754 127.205 157.287 126.986L151.776 124.414C151.444 125.058 150.775 125.5 150 125.5C148.895 125.5 148 124.605 148 123.5C148 122.395 148.895 121.5 150 121.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"12",d:"M129 121.5C130.105 121.5 131 122.395 131 123.5C131 124.605 130.105 125.5 129 125.5C128.068 125.5 127.288 124.862 127.065 124H114.285C113.907 124 113.542 124.144 113.265 124.401L107.416 129.832C106.954 130.261 106.346 130.5 105.715 130.5H84.7305C84.3845 131.097 83.7399 131.5 83 131.5C81.8954 131.5 81 130.605 81 129.5C81 128.395 81.8954 127.5 83 127.5C84.1046 127.5 85 128.395 85 129.5H105.715C106.093 129.5 106.458 129.357 106.735 129.1L112.584 123.668C113.046 123.239 113.654 123 114.285 123H127.065C127.288 122.138 128.068 121.5 129 121.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"13",d:"M170 112.5C171.105 112.5 172 113.395 172 114.5H183C184.381 114.5 185.5 115.619 185.5 117V117.982C185.5 118.782 185.117 119.534 184.471 120.004L174.921 126.947C174.971 127.123 175 127.308 175 127.5C175 128.605 174.105 129.5 173 129.5C171.895 129.5 171 128.605 171 127.5C171 126.395 171.895 125.5 173 125.5C173.551 125.5 174.049 125.723 174.41 126.083L183.882 119.195C184.27 118.913 184.5 118.462 184.5 117.982V117C184.5 116.172 183.828 115.5 183 115.5H171.73C171.385 116.097 170.74 116.5 170 116.5C168.895 116.5 168 115.605 168 114.5C168 113.395 168.895 112.5 170 112.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"14",d:"M74 121.5C75.1046 121.5 76 122.395 76 123.5C76 124.605 75.1046 125.5 74 125.5C73.0683 125.5 72.2877 124.862 72.0654 124H48.9346C48.7123 124.862 47.9317 125.5 47 125.5C45.8954 125.5 45 124.605 45 123.5C45 122.395 45.8954 121.5 47 121.5C47.9317 121.5 48.7123 122.138 48.9346 123H72.0654C72.2877 122.138 73.0683 121.5 74 121.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"15",d:"M109 108.5C110.105 108.5 111 109.395 111 110.5C111 111.605 110.105 112.5 109 112.5C108.603 112.5 108.233 112.383 107.922 112.183L98.833 119C98.3999 119.324 97.8741 119.5 97.333 119.5H80.7305C80.3845 120.097 79.7399 120.5 79 120.5C77.8954 120.5 77 119.605 77 118.5C77 117.395 77.8954 116.5 79 116.5C80.1046 116.5 81 117.395 81 118.5H97.333C97.6571 118.5 97.9738 118.395 98.233 118.2L107.237 111.446C107.086 111.165 107 110.842 107 110.5C107 109.395 107.895 108.5 109 108.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"16",d:"M31 106.5C32.1046 106.5 33 107.395 33 108.5C33 109.605 32.1046 110.5 31 110.5C30.2601 110.5 29.6155 110.097 29.2695 109.5H15.5C14.6717 109.5 14.0002 110.172 14 111V114.5C13.9998 115.881 12.8806 117 11.5 117H4C3.97814 117 3.95668 116.996 3.93555 116.993C3.71564 117.859 2.93411 118.5 2 118.5C0.89543 118.5 0 117.605 0 116.5C0 115.395 0.89543 114.5 2 114.5C2.93384 114.5 3.71527 115.141 3.93555 116.006C3.95668 116.003 3.97814 116 4 116H11.5C12.3283 116 12.9998 115.328 13 114.5V111C13.0002 109.619 14.1194 108.5 15.5 108.5H29C29 107.395 29.8954 106.5 31 106.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"17",d:"M65 99.5C66.1046 99.5 67 100.395 67 101.5C67 102.432 66.3622 103.211 65.5 103.434V114.5C65.4998 115.881 64.3806 117 63 117H31.9346C31.7123 117.862 30.9317 118.5 30 118.5C28.8954 118.5 28 117.605 28 116.5C28 115.395 28.8954 114.5 30 114.5C30.9317 114.5 31.7123 115.138 31.9346 116H63C63.8283 116 64.4998 115.328 64.5 114.5V103.434C63.6378 103.211 63 102.432 63 101.5C63 100.395 63.8954 99.5 65 99.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"18",d:"M144 114.5C145.105 114.5 146 115.395 146 116.5C146 117.605 145.105 118.5 144 118.5C143.066 118.5 142.283 117.859 142.063 116.993C142.043 116.996 142.022 117 142 117H115.935C115.712 117.862 114.932 118.5 114 118.5C112.895 118.5 112 117.605 112 116.5C112 115.395 112.895 114.5 114 114.5C114.932 114.5 115.712 115.138 115.935 116H142C142.022 116 142.043 116.003 142.063 116.006C142.284 115.141 143.066 114.5 144 114.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"19",d:"M106 98.5C107.105 98.5 108 99.3954 108 100.5C108 101.605 107.105 102.5 106 102.5C105.606 102.5 105.239 102.384 104.93 102.188L92.709 109.171C92.3311 109.387 91.9041 109.5 91.469 109.5H77.7305C77.3845 110.097 76.7399 110.5 76 110.5C74.8954 110.5 74 109.605 74 108.5C74 107.395 74.8954 106.5 76 106.5C77.1046 106.5 78 107.395 78 108.5H91.469C91.7299 108.5 91.9858 108.432 92.213 108.303L104.231 101.435C104.084 101.156 104 100.838 104 100.5C104 99.3954 104.895 98.5 106 98.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"20",d:"M114 92.5C115.105 92.5 116 93.3954 116 94.5C116 95.6046 115.105 96.5 114 96.5V107C114 107.828 114.672 108.5 115.5 108.5H132C132 107.395 132.895 106.5 134 106.5C135.105 106.5 136 107.395 136 108.5C136 109.605 135.105 110.5 134 110.5C133.26 110.5 132.615 110.097 132.27 109.5H115.5C114.119 109.5 113 108.381 113 107V96.2285C112.403 95.8843 112 95.2399 112 94.5C112 93.3954 112.895 92.5 114 92.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"21",d:"M183 99.5C184.105 99.5 185 100.395 185 101.5C185 102.605 184.105 103.5 183 103.5V107C183 108.381 181.881 109.5 180.5 109.5H158.73C158.385 110.097 157.74 110.5 157 110.5C155.895 110.5 155 109.605 155 108.5C155 107.395 155.895 106.5 157 106.5C158.105 106.5 159 107.395 159 108.5H180.5C181.328 108.5 182 107.828 182 107V103.229C181.403 102.884 181 102.24 181 101.5C181 100.395 181.895 99.5 183 99.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"22",d:"M3 69.5C4.10457 69.5 5 70.3954 5 71.5H11.5C12.8806 71.5 13.9998 72.6195 14 74V84.5C13.9998 85.8805 12.8806 87 11.5 87H6C5.17174 87 4.50016 87.6718 4.5 88.5V103.565C5.36226 103.788 6 104.568 6 105.5C6 106.605 5.10457 107.5 4 107.5C2.89543 107.5 2 106.605 2 105.5C2 104.568 2.63774 103.788 3.5 103.565V88.5C3.50016 87.1195 4.61937 86 6 86H11.5C12.3283 86 12.9998 85.3283 13 84.5V74C12.9998 73.1718 12.3283 72.5 11.5 72.5H4.73047C4.38452 73.0973 3.73985 73.5 3 73.5C1.89543 73.5 1 72.6046 1 71.5C1 70.3954 1.89543 69.5 3 69.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"23",d:"M57 92.5C58.1046 92.5 59 93.3954 59 94.5C59 95.4316 58.3622 96.2113 57.5 96.4336V104C57.4998 105.381 56.3806 106.5 55 106.5H42.7305C42.3845 107.097 41.7399 107.5 41 107.5C39.8954 107.5 39 106.605 39 105.5C39 104.395 39.8954 103.5 41 103.5C42.1046 103.5 43 104.395 43 105.5H55C55.8283 105.5 56.4998 104.828 56.5 104V96.4336C55.6378 96.2113 55 95.4316 55 94.5C55 93.3954 55.8954 92.5 57 92.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"24",d:"M9 89.5C10.1046 89.5 11 90.3954 11 91.5C11 92.4316 10.3622 93.2113 9.5 93.4336V99C9.50016 99.8282 10.1717 100.5 11 100.5H32C32 99.3954 32.8954 98.5 34 98.5C35.1046 98.5 36 99.3954 36 100.5C36 101.605 35.1046 102.5 34 102.5C33.2601 102.5 32.6155 102.097 32.2695 101.5H11C9.61937 101.5 8.50016 100.381 8.5 99V93.4336C7.63774 93.2113 7 92.4316 7 91.5C7 90.3954 7.89543 89.5 9 89.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"25",d:"M106 85.5C107.105 85.5 108 86.3954 108 87.5C108 88.4316 107.362 89.2113 106.5 89.4336V92.5C106.5 93.8805 105.381 95 104 95H92.2461C91.8851 95 91.5359 95.1313 91.2637 95.3672L84.8916 100.89C84.4368 101.283 83.8547 101.5 83.2539 101.5H75.7305C75.3845 102.097 74.7399 102.5 74 102.5C73.2254 102.5 72.5549 102.059 72.2227 101.415C72.0887 101.325 72.0001 101.173 72 101C72 100.941 72.0115 100.885 72.0303 100.832C72.0122 100.724 72 100.613 72 100.5C72 99.3954 72.8954 98.5 74 98.5C75.1046 98.5 76 99.3954 76 100.5H83.2539C83.6149 100.5 83.9641 100.37 84.2363 100.134L90.6084 94.6113C91.0632 94.2173 91.6453 94 92.2461 94H104C104.828 94 105.5 93.3283 105.5 92.5V89.4336C104.638 89.2113 104 88.4316 104 87.5C104 86.3954 104.895 85.5 106 85.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"26",d:"M153 98.5C154.105 98.5 155 99.3954 155 100.5C155 101.605 154.105 102.5 153 102.5C152.26 102.5 151.615 102.097 151.27 101.5H125.73C125.385 102.097 124.74 102.5 124 102.5C122.895 102.5 122 101.605 122 100.5C122 99.3954 122.895 98.5 124 98.5C125.105 98.5 126 99.3954 126 100.5H151C151 99.3954 151.895 98.5 153 98.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"27",d:"M147 77.5C147.932 77.5 148.712 78.1377 148.935 79H158.5C159.881 79 161 80.1195 161 81.5V95C161 95.8283 161.672 96.5 162.5 96.5H168C168 95.3954 168.895 94.5 170 94.5C171.105 94.5 172 95.3954 172 96.5C172 97.6046 171.105 98.5 170 98.5C169.26 98.5 168.615 98.0973 168.27 97.5H162.5C161.119 97.5 160 96.3805 160 95V81.5C160 80.6718 159.328 80 158.5 80H148.935C148.712 80.8623 147.932 81.5 147 81.5C145.895 81.5 145 80.6046 145 79.5C145 78.3954 145.895 77.5 147 77.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"28",d:"M17 92.5C18.1046 92.5 19 93.3954 19 94.5C19 95.6046 18.1046 96.5 17 96.5C15.8954 96.5 15 95.6046 15 94.5C15 93.3954 15.8954 92.5 17 92.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"29",d:"M47 92.5C48.1046 92.5 49 93.3954 49 94.5C49 95.6046 48.1046 96.5 47 96.5C46.0683 96.5 45.2877 95.8623 45.0654 95H19.5C19.224 95 19.0002 94.776 19 94.5C19.0002 94.224 19.224 94 19.5 94H45.0654C45.2877 93.1377 46.0683 92.5 47 92.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"30",d:"M114 84.5C114.932 84.5 115.712 85.1377 115.935 86H128C129.381 86 130.5 87.1195 130.5 88.5V92.5C130.5 93.3283 131.172 94 132 94H144.065C144.288 93.1377 145.068 92.5 146 92.5C147.105 92.5 148 93.3954 148 94.5C148 95.6046 147.105 96.5 146 96.5C145.068 96.5 144.288 95.8623 144.065 95H132C130.619 95 129.5 93.8805 129.5 92.5V88.5C129.5 87.6718 128.828 87 128 87H115.935C115.712 87.8623 114.932 88.5 114 88.5C112.895 88.5 112 87.6046 112 86.5C112 85.3954 112.895 84.5 114 84.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"31",d:"M53 16.5C54.1046 16.5 55 17.3954 55 18.5C55 19.6046 54.1046 20.5 53 20.5C52.3653 20.5 51.801 20.2033 51.4346 19.7422C51.3933 19.8161 51.3347 19.882 51.2568 19.9287L24.7285 35.8467C24.2768 36.1178 24 36.606 24 37.1328V41.5C24.0002 42.3283 24.6717 43 25.5 43H33C34.3806 43 35.4998 44.1195 35.5 45.5V51C35.5002 51.8282 36.1717 52.5 37 52.5H63.4912C64.8855 52.5 66.0105 53.6408 65.9912 55.0352L65.5127 89.5684C66.3686 89.7952 67 90.5729 67 91.5C67 92.6046 66.1046 93.5 65 93.5C63.8954 93.5 63 92.6046 63 91.5C63 90.5637 63.6439 89.7793 64.5127 89.5625L64.9912 55.0215C65.0028 54.1851 64.3278 53.5 63.4912 53.5H37C35.6194 53.5 34.5002 52.3805 34.5 51V45.5C34.4998 44.6718 33.8283 44 33 44H25.5C24.1194 44 23.0002 42.8805 23 41.5V37.1328C23 36.2547 23.461 35.4411 24.2139 34.9893L50.7432 19.0713C50.8446 19.0106 50.9587 18.9919 51.0674 19.0068C51.025 18.8448 51 18.6753 51 18.5C51 17.3954 51.8954 16.5 53 16.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"32",d:"M104 79.5C105.105 79.5 106 80.3954 106 81.5C106 82.6046 105.105 83.5 104 83.5C103.068 83.5 102.288 82.8623 102.065 82H84.5C83.6717 82 83 82.6718 83 83.5V88.5C83 89.8805 81.8806 91 80.5 91H75.9346C75.7123 91.8623 74.9317 92.5 74 92.5C72.8954 92.5 72 91.6046 72 90.5C72 89.3954 72.8954 88.5 74 88.5C74.9317 88.5 75.7123 89.1377 75.9346 90H80.5C81.3283 90 82 89.3283 82 88.5V83.5C82 82.1195 83.1194 81 84.5 81H102.065C102.288 80.1377 103.068 79.5 104 79.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"33",d:"M170 66.5C171.105 66.5 172 67.3954 172 68.5C172 69.2399 171.597 69.8845 171 70.2295V84.5C171 85.3283 171.672 86 172.5 86H179.299C179.866 86 180.417 86.1931 180.86 86.5479L183.802 88.9014C184.136 88.6497 184.55 88.5 185 88.5C186.105 88.5 187 89.3954 187 90.5C187 91.6046 186.105 92.5 185 92.5C183.895 92.5 183 91.6046 183 90.5C183 90.2082 183.063 89.9321 183.176 89.6816L180.235 87.3291C179.969 87.1157 179.639 87 179.299 87H172.5C171.119 87 170 85.8805 170 84.5V70.5C168.895 70.5 168 69.6046 168 68.5C168 67.3954 168.895 66.5 170 66.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"34",d:"M23 77.5C23.9317 77.5 24.7123 78.1377 24.9346 79H45C45.8283 79 46.4998 79.6718 46.5 80.5C46.5002 80.776 46.724 81 47 81H55C56.3806 81 57.4998 82.1195 57.5 83.5V86.5654C58.3623 86.7877 59 87.5683 59 88.5C59 89.6046 58.1046 90.5 57 90.5C55.8954 90.5 55 89.6046 55 88.5C55 87.5683 55.6377 86.7877 56.5 86.5654V83.5C56.4998 82.6718 55.8283 82 55 82H47C46.1717 82 45.5002 81.3282 45.5 80.5C45.4998 80.224 45.276 80 45 80H24.9346C24.7123 80.8623 23.9317 81.5 23 81.5C21.8954 81.5 21 80.6046 21 79.5C21 78.3954 21.8954 77.5 23 77.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"35",d:"M41 84.5C42.1046 84.5 43 85.3954 43 86.5C43 87.6046 42.1046 88.5 41 88.5C40.0683 88.5 39.2877 87.8623 39.0654 87H19.9346C19.7123 87.8623 18.9317 88.5 18 88.5C16.8954 88.5 16 87.6046 16 86.5C16 85.3954 16.8954 84.5 18 84.5C18.9317 84.5 19.7123 85.1377 19.9346 86H39.0654C39.2877 85.1377 40.0683 84.5 41 84.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"36",d:"M73 57.5C73.9317 57.5 74.7123 58.1377 74.9346 59H82C83.3806 59 84.4998 60.1195 84.5 61.5V67C84.4998 68.3805 83.3806 69.5 82 69.5H74.582C73.7301 69.5 73.049 70.209 73.083 71.0605L73.3818 78.5371C74.3037 78.7148 75 79.5259 75 80.5C75 81.6046 74.1046 82.5 73 82.5C71.8954 82.5 71 81.6046 71 80.5C71 79.6111 71.5804 78.8577 72.3828 78.5977L72.084 71.1006C72.0272 69.6819 73.1621 68.5 74.582 68.5H82C82.8283 68.5 83.4998 67.8283 83.5 67V61.5C83.4998 60.6718 82.8283 60 82 60H74.9346C74.7123 60.8623 73.9317 61.5 73 61.5C71.8954 61.5 71 60.6046 71 59.5C71 58.3954 71.8954 57.5 73 57.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"37",d:"M137 77.5C138.105 77.5 139 78.3954 139 79.5C139 80.6046 138.105 81.5 137 81.5C136.068 81.5 135.288 80.8623 135.065 80H115.935C115.712 80.8623 114.932 81.5 114 81.5C112.895 81.5 112 80.6046 112 79.5C112 78.3954 112.895 77.5 114 77.5C114.932 77.5 115.712 78.1377 115.935 79H135.065C135.288 78.1377 136.068 77.5 137 77.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"38",d:"M185 61C185.276 61 185.5 61.224 185.5 61.5V79.5C185.5 79.776 185.276 80 185 80C184.724 80 184.5 79.776 184.5 79.5V61.5C184.5 61.224 184.724 61 185 61Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"39",d:"M3 59.5C3.93166 59.5 4.71226 60.1377 4.93457 61H55C56.3806 61 57.4998 62.1195 57.5 63.5V72.5654C58.3623 72.7877 59 73.5683 59 74.5C59 75.6046 58.1046 76.5 57 76.5C55.8954 76.5 55 75.6046 55 74.5C55 73.5683 55.6377 72.7877 56.5 72.5654V63.5C56.4998 62.6718 55.8283 62 55 62H4.93457C4.71226 62.8623 3.93166 63.5 3 63.5C1.89543 63.5 1 62.6046 1 61.5C1 60.3954 1.89543 59.5 3 59.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"40",d:"M95 72.5C96.1046 72.5 97 73.3954 97 74.5C97 75.6046 96.1046 76.5 95 76.5C94.0683 76.5 93.2877 75.8623 93.0654 75H80.9346C80.7123 75.8623 79.9317 76.5 79 76.5C77.8954 76.5 77 75.6046 77 74.5C77 73.3954 77.8954 72.5 79 72.5C79.9317 72.5 80.7123 73.1377 80.9346 74H93.0654C93.2877 73.1377 94.0683 72.5 95 72.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"41",d:"M47 70.5C48.1046 70.5 49 71.3954 49 72.5C49 73.6046 48.1046 74.5 47 74.5C46.2601 74.5 45.6155 74.0973 45.2695 73.5H24.7305C24.3845 74.0973 23.7399 74.5 23 74.5C21.8954 74.5 21 73.6046 21 72.5C21 71.3954 21.8954 70.5 23 70.5C24.1046 70.5 25 71.3954 25 72.5H45C45 71.3954 45.8954 70.5 47 70.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"42",d:"M91 57.5C91.9317 57.5 92.7123 58.1377 92.9346 59H98C99.3806 59 100.5 60.1195 100.5 61.5V69.5654C101.362 69.7877 102 70.5683 102 71.5C102 72.6046 101.105 73.5 100 73.5C98.8954 73.5 98 72.6046 98 71.5C98 70.5683 98.6377 69.7877 99.5 69.5654V61.5C99.4998 60.6718 98.8283 60 98 60H92.9346C92.7123 60.8623 91.9317 61.5 91 61.5C89.8954 61.5 89 60.6046 89 59.5C89 58.3954 89.8954 57.5 91 57.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"43",d:"M124 69.5C125.105 69.5 126 70.3954 126 71.5C126 72.6046 125.105 73.5 124 73.5C123.26 73.5 122.615 73.0973 122.27 72.5H110.73C110.385 73.0973 109.74 73.5 109 73.5C107.895 73.5 107 72.6046 107 71.5C107 70.3954 107.895 69.5 109 69.5C110.105 69.5 111 70.3954 111 71.5H122C122 70.3954 122.895 69.5 124 69.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"44",d:"M135 56.5C136.105 56.5 137 57.3954 137 58.5C137 59.4316 136.362 60.2113 135.5 60.4336V70C135.5 70.8283 136.172 71.5 137 71.5H148.576C149.866 71.5 150.554 69.979 149.702 69.0098L148.049 67.1309C146.254 65.0913 148.543 62.0548 150.998 63.2207L158.78 66.9175C159.118 66.6565 159.54 66.5 160 66.5C161.086 66.5 161.968 67.3665 161.997 68.4453L162.215 68.5488C162.464 68.667 162.57 68.9658 162.451 69.2148C162.333 69.4639 162.034 69.5701 161.785 69.4521L161.764 69.4414C161.427 70.0711 160.764 70.5 160 70.5C158.895 70.5 158 69.6046 158 68.5C158 68.2261 158.055 67.9648 158.154 67.7275L150.569 64.124C149.097 63.4237 147.723 65.2461 148.8 66.4697L150.453 68.3486C151.874 69.9636 150.727 72.5 148.576 72.5H137C135.619 72.5 134.5 71.3805 134.5 70V60.4336C133.638 60.2113 133 59.4316 133 58.5C133 57.3954 133.895 56.5 135 56.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"45",d:"M128 52.5C129.105 52.5 130 53.3954 130 54.5C130 55.6046 129.105 56.5 128 56.5V59.5C128 60.8805 126.881 62 125.5 62H109.935C109.712 62.8623 108.932 63.5 108 63.5C106.895 63.5 106 62.6046 106 61.5C106 60.3954 106.895 59.5 108 59.5C108.932 59.5 109.712 60.1377 109.935 61H125.5C126.328 61 127 60.3283 127 59.5V56.2285C126.403 55.8843 126 55.2399 126 54.5C126 53.7261 126.44 53.0546 127.083 52.7227C127.173 52.5878 127.326 52.5 127.5 52.5C127.558 52.5 127.614 52.5115 127.666 52.5303C127.775 52.5122 127.886 52.5 128 52.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"46",d:"M152.5 51C153.881 51 155 52.1195 155 53.5V56.4297C155 56.9311 155.251 57.3999 155.668 57.6777L160.777 61.084C161.007 61.2372 161.069 61.5476 160.916 61.7773C160.763 62.0071 160.452 62.0688 160.223 61.9165L155.113 58.5098C154.418 58.0461 154 57.2651 154 56.4297V53.5C154 52.6718 153.328 52 152.5 52H142C141.724 52 141.5 51.776 141.5 51.5C141.5 51.224 141.724 51 142 51H152.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"47",d:"M177.228 52.5811C177.459 52.431 177.768 52.4959 177.919 52.7275C178.069 52.9592 178.004 53.2686 177.772 53.4199L167.772 59.9199C167.541 60.07 167.232 60.0044 167.081 59.7725C166.931 59.5405 166.996 59.2318 167.228 59.0811L177.228 52.5811Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"48",d:"M118 53.5C119.105 53.5 120 54.3954 120 55.5C120 56.6046 119.105 57.5 118 57.5C117.068 57.5 116.288 56.8623 116.065 56H109.935C109.712 56.8623 108.932 57.5 108 57.5C106.895 57.5 106 56.6046 106 55.5C106 54.3954 106.895 53.5 108 53.5C108.932 53.5 109.712 54.1377 109.935 55H116.065C116.288 54.1377 117.068 53.5 118 53.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"49",d:"M176 42.5C177.105 42.5 178 43.3954 178 44.5C178 45.6046 177.105 46.5 176 46.5C175.52 46.5 175.08 46.331 174.735 46.0488L163.866 53.7861C163.951 54.0083 164 54.248 164 54.5C164 55.6046 163.105 56.5 162 56.5C160.895 56.5 160 55.6046 160 54.5C160 53.3954 160.895 52.5 162 52.5C162.49 52.5 162.938 52.6768 163.286 52.9697L174.144 45.2422C174.052 45.0132 174 44.7624 174 44.5C174 43.3954 174.895 42.5 176 42.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"50",d:"M156 19.5C156.932 19.5 157.712 20.1377 157.935 21H174.498C175.176 21.0001 175.825 21.276 176.296 21.7637L184.799 30.5723C185.248 31.0383 185.5 31.661 185.5 32.3086V52.5654C186.362 52.7877 187 53.5683 187 54.5C187 55.6046 186.105 56.5 185 56.5C183.895 56.5 183 55.6046 183 54.5C183 53.5683 183.638 52.7877 184.5 52.5654V32.3086C184.5 31.9199 184.349 31.5462 184.079 31.2666L175.577 22.459C175.295 22.1663 174.905 22.0001 174.498 22H157.935C157.712 22.8623 156.932 23.5 156 23.5C154.895 23.5 154 22.6046 154 21.5C154 20.3954 154.895 19.5 156 19.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"51",d:"M4 17.5C5.10457 17.5 6 18.3954 6 19.5C6 20.4316 5.36226 21.2113 4.5 21.4336V51C4.50016 51.8282 5.17174 52.5 6 52.5H21C21 51.3954 21.8954 50.5 23 50.5C24.1046 50.5 25 51.3954 25 52.5C25 53.6046 24.1046 54.5 23 54.5C22.2601 54.5 21.6155 54.0973 21.2695 53.5H6C4.61937 53.5 3.50016 52.3805 3.5 51V21.4336C2.63774 21.2113 2 20.4316 2 19.5C2 18.3954 2.89543 17.5 4 17.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"52",d:"M76 38.5C77.1046 38.5 78 39.3954 78 40.5C78 41.4316 77.3622 42.2113 76.5 42.4336V51C76.4998 51.8282 77.1717 52.5 78 52.5H89.5C90.3283 52.5 91 51.8282 91 51V43.5C91 42.1195 92.1194 41 93.5 41H104.065C104.288 40.1377 105.068 39.5 106 39.5C107.105 39.5 108 40.3954 108 41.5C108 42.6046 107.105 43.5 106 43.5C105.068 43.5 104.288 42.8623 104.065 42H93.5C92.6717 42 92 42.6718 92 43.5V51C92 52.3805 90.8806 53.5 89.5 53.5H78C76.6194 53.5 75.4998 52.3805 75.5 51V42.4336C74.6378 42.2113 74 41.4316 74 40.5C74 39.3954 74.8954 38.5 76 38.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"53",d:"M135 31.5C136.105 31.5 137 32.3954 137 33.5C137 34.4316 136.362 35.2113 135.5 35.4336V49.5654C136.362 49.7877 137 50.5683 137 51.5C137 52.6046 136.105 53.5 135 53.5C133.895 53.5 133 52.6046 133 51.5C133 50.5683 133.638 49.7877 134.5 49.5654V35.4336C133.638 35.2113 133 34.4316 133 33.5C133 32.3954 133.895 31.5 135 31.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"54",d:"M124 46.5C125.105 46.5 126 47.3954 126 48.5C126 49.6046 125.105 50.5 124 50.5C123.068 50.5 122.288 49.8623 122.065 49H100.935C100.712 49.8623 99.9317 50.5 99 50.5C97.8954 50.5 97 49.6046 97 48.5C97 47.3954 97.8954 46.5 99 46.5C99.9317 46.5 100.712 47.1377 100.935 48H122.065C122.288 47.1377 123.068 46.5 124 46.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"55",d:"M13 24.5C14.1046 24.5 15 25.3954 15 26.5C15 27.4341 14.3589 28.2156 13.4932 28.4355C13.4959 28.4566 13.5 28.4782 13.5 28.5V44.5654C14.3623 44.7877 15 45.5683 15 46.5C15 47.6046 14.1046 48.5 13 48.5C11.8954 48.5 11 47.6046 11 46.5C11 45.5683 11.6377 44.7877 12.5 44.5654V28.5C12.5 28.4782 12.5031 28.4566 12.5059 28.4355C11.6406 28.2153 11 27.4338 11 26.5C11 25.3954 11.8954 24.5 13 24.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"56",d:"M61.8604 32.0469C63.5238 31.1949 65.4997 32.4026 65.5 34.2715V43.5C65.5 43.5216 65.4958 43.5426 65.4932 43.5635C66.359 43.7833 67 44.5658 67 45.5C67 46.6046 66.1046 47.5 65 47.5C63.8954 47.5 63 46.6046 63 45.5C63 44.5661 63.6405 43.7836 64.5059 43.5635C64.5032 43.5426 64.5 43.5216 64.5 43.5V34.2715C64.4997 33.1504 63.3144 32.4256 62.3164 32.9365L45.8164 41.3877C45.3156 41.6443 45.0002 42.16 45 42.7227V44.7695C45.5973 45.1152 46 45.7601 46 46.5C46 47.6046 45.1046 48.5 44 48.5C42.8954 48.5 42 47.6046 42 46.5C42 45.3954 42.8954 44.5 44 44.5V42.7227C44.0002 41.7847 44.5255 40.9257 45.3604 40.498L61.8604 32.0469Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"57",d:"M169.597 28C170.315 28 170.998 28.3091 171.473 28.8477L175.223 33.1025C175.625 33.5591 175.847 34.1474 175.847 34.7559V38.291C175.847 39.2203 175.331 40.0732 174.508 40.5049L162.232 46.9434C161.988 47.0713 161.686 46.977 161.558 46.7324C161.429 46.4878 161.523 46.1863 161.768 46.0576L174.044 39.6191C174.537 39.3601 174.847 38.8484 174.847 38.291V34.7559C174.847 34.3908 174.713 34.0386 174.472 33.7646L170.722 29.5088C170.437 29.1857 170.027 29 169.597 29H162C161.724 29 161.5 28.776 161.5 28.5C161.5 28.224 161.724 28 162 28H169.597Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"58",d:"M156 43C156.276 43 156.5 43.224 156.5 43.5C156.5 43.776 156.276 44 156 44H142C141.724 44 141.5 43.776 141.5 43.5C141.5 43.224 141.724 43 142 43H156Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"59",d:"M130 39.5C131.105 39.5 132 40.3954 132 41.5C132 42.6046 131.105 43.5 130 43.5C129.068 43.5 128.288 42.8623 128.065 42H114.935C114.712 42.8623 113.932 43.5 113 43.5C111.895 43.5 111 42.6046 111 41.5C111 40.3954 111.895 39.5 113 39.5C113.932 39.5 114.712 40.1377 114.935 41H128.065C128.288 40.1377 129.068 39.5 130 39.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"60",d:"M53 26.5C54.1046 26.5 55 27.3954 55 28.5C55 29.6046 54.1046 30.5 53 30.5C52.6184 30.5 52.2633 30.3905 51.96 30.2051L41.874 35.8086C41.9537 36.0244 42 36.2566 42 36.5C42 37.6046 41.1046 38.5 40 38.5C38.8954 38.5 38 37.6046 38 36.5C38 35.3954 38.8954 34.5 40 34.5C40.4971 34.5 40.951 34.6823 41.3008 34.9824L51.2441 29.459C51.0883 29.1743 51 28.8474 51 28.5C51 27.3954 51.8954 26.5 53 26.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"61",d:"M170.413 35.5078C170.685 35.4599 170.944 35.6414 170.992 35.9131C171.04 36.1849 170.859 36.4452 170.587 36.4932L162.087 37.9932C161.815 38.0411 161.556 37.8586 161.508 37.5869C161.46 37.3151 161.641 37.0558 161.913 37.0078L170.413 35.5078Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"62",d:"M150 26.5C151.105 26.5 152 27.3954 152 28.5C152 29.6046 151.105 30.5 150 30.5C149.068 30.5 148.288 29.8623 148.065 29H132C131.172 29 130.5 29.6718 130.5 30.5V32C130.5 33.3805 129.381 34.5 128 34.5H107.73C107.385 35.0973 106.74 35.5 106 35.5C104.895 35.5 104 34.6046 104 33.5C104 32.3954 104.895 31.5 106 31.5C107.105 31.5 108 32.3954 108 33.5H128C128.828 33.5 129.5 32.8283 129.5 32V30.5C129.5 29.1195 130.619 28 132 28H148.065C148.288 27.1377 149.068 26.5 150 26.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"63",d:"M154 31.5C155.105 31.5 156 32.3954 156 33.5C156 34.6046 155.105 35.5 154 35.5C153.26 35.5 152.615 35.0973 152.27 34.5H142.73C142.385 35.0973 141.74 35.5 141 35.5C139.895 35.5 139 34.6046 139 33.5C139 32.3954 139.895 31.5 141 31.5C142.105 31.5 143 32.3954 143 33.5H152C152 32.3954 152.895 31.5 154 31.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"64",d:"M98 22.5C99.1046 22.5 100 23.3954 100 24.5C100 25.4316 99.3622 26.2113 98.5 26.4336V28.5C98.4998 29.8805 97.3806 31 96 31H73.9346C73.7123 31.8623 72.9317 32.5 72 32.5C70.8954 32.5 70 31.6046 70 30.5C70 29.3954 70.8954 28.5 72 28.5C72.9317 28.5 73.7123 29.1377 73.9346 30H96C96.8283 30 97.4998 29.3283 97.5 28.5V26.4336C96.6378 26.2113 96 25.4316 96 24.5C96 23.3954 96.8954 22.5 98 22.5Z",fill:"var(--strokeMain)"}),(0,b.jsx)("path",{className:"circuit-path","data-id":"65",d:"M52.1602 6.58301C53.266 5.87612 54.7308 6.14479 55.5137 7.19824L65.3203 20.3955C65.6033 20.7761 66.0502 20.9999 66.5244 21H90.0654C90.2877 20.1377 91.0683 19.5 92 19.5C93.1046 19.5 94 20.3954 94 21.5C94 22.6046 93.1046 23.5 92 23.5C91.0683 23.5 90.2877 22.8623 90.0654 22H66.5244C65.7339 21.9999 64.9902 21.6257 64.5186 20.9912L54.7109 7.79492C54.2412 7.16281 53.3618 7.00155 52.6982 7.42578L24.6436 25.3623C24.8676 25.6854 25 26.077 25 26.5C25 27.6046 24.1046 28.5 23 28.5C21.8954 28.5 21 27.6046 21 26.5C21 25.3954 21.8954 24.5 23 24.5C23.3011 24.5 23.5857 24.5684 23.8418 24.6875L52.1602 6.58301Z",fill:"var(--strokeMain)"})]})})]}),(0,b.jsx)(Ee,{$left:"20",$delay:"0"}),(0,b.jsx)(Ee,{$left:"35",$delay:"1.5"}),(0,b.jsx)(Ee,{$left:"55",$delay:"3"}),(0,b.jsx)(Ee,{$left:"70",$delay:"4.5"}),(0,b.jsx)(Ee,{$left:"85",$delay:"6"}),(0,b.jsx)(Be,{})]});function Me(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const Te=(0,A.default)(c.Flex).attrs((e=>function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Me(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Me(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({flex:"grow",border:{side:"all",color:"inputBorder"},round:.5,padding:[3,4],margin:[0,0,1,0],cursor:e.disabled?"default":"pointer"},e))).withConfig({displayName:"sampleItem__SampleItemContainer",componentId:"sc-1oy7a7o-0"})(["&:hover{border-color:",";background-color:",";}&:hover > span{color:",";}"],(e=>{let{disabled:n}=e;return n?"inputBorder":(0,c.getColor)("primaryAI")}),(e=>{let{disabled:n}=e;return n?"none":(0,c.getColor)("secondaryHighlightAI")}),(e=>{let{disabled:n}=e;return n?(0,c.getColor)("text"):(0,c.getColor)("primaryAI")})),Ie=e=>{let{label:n,setComposerValue:t,disabled:o}=e;const a=(0,i.useCallback)((()=>{o||t(n)}),[n,t,o]);return(0,b.jsx)(Te,{disabled:o,onClick:a,children:(0,b.jsx)(c.Text,{color:"tooltipText",children:n})})},ve=e=>{const n=(e=>e.length?Math.floor(Math.random()*e.length):null)(e);return[n,n===e.length-1?0:n+1]},_e=["How is my infra today?","Why so many alerts today?","What happened last night?","Is this CPU usage normal?","Which server needs attention first?","Anything weird going on?","What's eating all the RAM?","Compare this week vs last week","What's my busiest node?","Any containers acting up?","What should I check first?","Is my database healthy?","Predict any issues coming?","Summarize the last hour"],Qe=(0,i.memo)((e=>{let{canChat:n,setComposerValue:t}=e;const o=(0,i.useMemo)((()=>ve(_e).map((e=>_e[e]))),[]);return(0,b.jsx)(b.Fragment,{children:o.map((e=>(0,b.jsx)(Ie,{label:e,setComposerValue:t,disabled:!n},e)))})}));var De=t(21996);function xe(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function ke(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?xe(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):xe(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Re=(0,A.default)(c.Flex).attrs((e=>ke({width:"100%",padding:[0,4],background:e.highlight?"panelBg":"none",column:!0,border:{side:"all",color:"inputBorder"},round:.5},e))).withConfig({displayName:"composer__ComposerContainer",componentId:"sc-zovuse-0"})(["",""],(e=>{let{highlight:n}=e;return n?"filter: drop-shadow(0 -8px 15px ".concat((0,c.getColor)("mainBackground"),");"):""})),Se=(0,A.default)(De.A).attrs((e=>ke({background:e.highlight?"panelBg":"none",fontSize:"12px"},e))).withConfig({displayName:"composer__ComposerTextarea",componentId:"sc-zovuse-1"})(["min-height:40px;max-height:200px;overflow-y:auto;resize:none;&&{padding:16px 0;border:none;}"]),Pe=e=>{let{testId:n="chat-composer",value:t,placeholder:o,disabled:a,highlight:s,ToolsComponent:r,containerProps:A={},textareaProps:l={},onSend:d=Y()}=e;const h=(0,i.useRef)(null),[m,g]=(0,i.useState)(t||""),p=(0,i.useCallback)((()=>{m&&(d(m),g(""),h.current&&h.current.focus())}),[m,d,g]),f=(0,i.useCallback)((e=>{const{shiftKey:n,key:t}=e;"Enter"!==t||n||(e.preventDefault(),p())}),[p]);return(0,i.useEffect)((()=>{!a&&h.current&&h.current.focus()}),[a]),(0,b.jsxs)(Re,ke(ke({"data-testid":n,highlight:s},A),{},{children:[(0,b.jsx)(Se,ke({ref:h,name:"chat-composer",value:m,placeholder:o,disabled:a,onChange:g,onKeyDown:f,highlight:s,autoFocus:"autofocus"},l)),(0,b.jsxs)(c.Flex,{position:"relative",alignItems:"baseline",justifyContent:r?"between":"end",padding:[2,0,4,0],children:[r?(0,b.jsx)(r,{value:m,disabled:a}):null,(0,b.jsx)(u.A,{"data-testid":"".concat(n,"-send-button"),label:"",flavour:"hollow",icon:"arrow_left",iconRotate:1,disabled:!m||a,onClick:p})]})]}))};var Fe=t(40531),Ye=t(40573);const Ue=["canChat","isPaid","onClose"];function Ne(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function je(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Ne(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Ne(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ze=()=>(0,b.jsxs)(c.Flex,{alignItems:"center",gap:2,justifyContent:"center",children:[(0,b.jsx)(c.Icon,{name:"warning_triangle",color:"warning"}),(0,b.jsx)(c.Text,{children:"You don't have permissions to use Netdata AI"})]}),He=e=>{let{canChat:n,isPaid:t,onClose:o}=e,i=(0,r.A)(e,Ue);const{isDemo:a}=(0,Ye.A)();return t?n?null:(0,b.jsx)(ze,je({},i)):(0,b.jsx)(c.Flex,je(je({column:!0,gap:2,alignItems:"center"},i),{},{children:a?(0,b.jsx)(c.Text,{children:"This is a demo space"}):(0,b.jsxs)(b.Fragment,{children:[(0,b.jsx)(c.Text,{children:"Please upgrade to use Netdata AI"}),(0,b.jsx)(Fe.A,{onClick:o})]})}))};var Oe=t(30413),Le=t(16402),Ge=t(25147);const Je=(0,U.A)(c.Flex),qe=e=>{let{selectedItemsCount:n}=e;const t=n>=Ge.C,o=(0,i.useMemo)((()=>t?"You have selected the maximum number of reports":null),[t]);return n>0?(0,b.jsxs)(Je,{alignItems:"center",gap:1,tooltip:o,tooltipProps:{align:"bottom"},children:[t?(0,b.jsx)(c.Icon,{name:"warning_triangle",color:"warning"}):null,(0,b.jsxs)(c.Text,{color:"textLite",children:[n," reports selected"]})]}):(0,b.jsx)("div",{})},Ke=e=>{let{height:n,onClose:t}=e;const{activeConversation:o,setAssociatedItemSelectorItems:a,associateConversation:s}=(0,h.A)(),{selectedItems:r=[]}=(null===o||void 0===o?void 0:o.associatedItemSelector)||{},A=0===r.length,l=(0,i.useCallback)((e=>{let{value:n,checked:t}=e;if(null!==o&&void 0!==o&&o.associatedItemSelector){const{selectedItems:e=[]}=o.associatedItemSelector,i=t?e.includes(n)?e:[...e,n]:e.filter((e=>e!==n));a(i)}}),[null===o||void 0===o?void 0:o.associatedItemSelector,a]),d=(0,i.useCallback)((()=>{if(r.length){const e=r.map((e=>({type:"report",data:{id:e}})));s({payload:{subjects:e}}),t()}}),[r,s,t]),m=(0,i.useCallback)((()=>{a([])}),[a]);return(0,b.jsxs)(b.Fragment,{children:[(0,b.jsx)(c.Flex,{id:"chat-composer-tool-report-selector-container",height:{base:"".concat(n,"px"),max:"500px"},column:!0,overflow:{vertical:"auto"},children:(0,b.jsx)(Oe.A,{flavour:Le.BG.chat,onItemSelectionChange:l})}),(0,b.jsxs)(c.Flex,{alignItems:"center",justifyContent:"between",padding:[2],border:{side:"top",color:"border"},children:[(0,b.jsx)(qe,{selectedItemsCount:r.length}),(0,b.jsxs)(c.Flex,{alignItems:"center",gap:2,children:[r.length>0?(0,b.jsx)(u.A,{label:"Clear selection",flavour:"hollow",icon:"",onClick:m}):null,(0,b.jsx)(u.A,{label:"Import reports",icon:"",disabled:A,onClick:d})]})]})]})},Ve={report:{id:"report",label:"Report",icon:"view_list",windowTitle:"Add Report",windowSubtitle:"You can add up to ".concat(Ge.C," reports"),Component:Ke}};function Xe(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const We=(0,A.default)(c.Flex).attrs((e=>function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Xe(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Xe(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({alignItems:"center",gap:2,padding:[2],cursor:"pointer",_hover:{background:"highlight"}},e))).withConfig({displayName:"toolItem__StyledFlex",componentId:"sc-146c0et-0"})([""]),Ze=e=>{let{selector:n,onClick:t=Y()}=e;const o=(0,i.useCallback)((()=>{null!==n&&void 0!==n&&n.id?t(n):console.warn("Chat tools: No tool selector")}),[null===n||void 0===n?void 0:n.id,t]);return(0,b.jsxs)(We,{onClick:o,children:[(0,b.jsx)(c.Icon,{name:null===n||void 0===n?void 0:n.icon,color:"text"}),(0,b.jsx)(c.Text,{children:null===n||void 0===n?void 0:n.label})]})},$e=e=>{let{onItemClick:n}=e;return(0,b.jsx)(b.Fragment,{children:Object.values(Ve).map((e=>(0,b.jsx)(Ze,{selector:e,onClick:n},e)))})},en=e=>{let{disabled:n,onClose:t}=e;const{activeConversation:o,setReportMode:a}=(0,h.A)(),s=null===o||void 0===o?void 0:o.reportMode,r=s?"primaryAI":"text",A=(0,i.useCallback)((e=>{let{target:n}=e;a(n.checked),t()}),[a,t]);return(0,b.jsxs)(c.Flex,{alignItems:"center",justifyContent:"between",padding:[2],border:{side:"top",color:"border"},children:[(0,b.jsxs)(c.Flex,{alignItem:"center",gap:2,children:[(0,b.jsx)(c.Icon,{name:"ai",color:r}),(0,b.jsx)(c.Text,{color:r,children:"Generate report"})]}),(0,b.jsx)(c.Toggle,{colored:!0,checked:s,disabled:n,onChange:A,toggleProps:{uncheckedColor:["neutral","grey130"],checkedColor:"primaryAI"}})]})},nn=e=>{let{disabled:n,onSelectorClick:t,onClose:o}=e;return(0,b.jsxs)(c.Flex,{column:!0,border:{side:"all",color:"border"},round:!0,children:[(0,b.jsx)(c.Flex,{alignItems:"center",padding:[2],border:{side:"bottom",color:"border"},children:(0,b.jsx)(c.TextSmall,{color:"textLite",children:"Add a subject to your conversation"})}),(0,b.jsx)($e,{onItemClick:t}),(0,b.jsx)(en,{disabled:n,onClose:o})]})},tn=e=>{let{disabled:n,onSelectorClick:t}=e;const o=(0,i.useRef)(),[a,s,,r]=(0,D.A)(!1),A=(0,i.useCallback)((e=>{t(e),r()}),[t,r]);return(0,b.jsxs)(b.Fragment,{children:[(0,b.jsx)(c.Flex,{id:"chat-ai-modal-composer-tools-menu-accessor",ref:o,padding:[2,0,0,0],children:(0,b.jsx)(c.Button,{icon:"plus",flavour:"hollow",neutral:!0,onClick:s,disabled:n})}),o.current&&a?(0,b.jsx)(c.Drop,{width:60,target:o.current,align:{bottom:"top",left:"left"},background:"modalBackground",round:!0,onClickOutside:r,onEsc:r,children:(0,b.jsx)(nn,{disabled:n,onSelectorClick:A,onClose:r})}):null]})},on=(0,U.A)(c.Flex),an=()=>{const{activeConversation:e}=(0,h.A)();return(null===e||void 0===e?void 0:e.reportMode)?(0,b.jsx)(on,{width:"26px",height:"22px",alignItems:"center",justifyContent:"center",round:!0,background:"insightsBlueSemi",tooltip:{title:"Report mode",description:"You are now on report mode. You can trigger background investigation reports from the chat."},tooltipProps:{align:"top"},children:(0,b.jsx)(c.Icon,{name:"ai",color:"primaryAI",height:"12px"})}):null};function sn(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const rn=(0,A.default)(c.Flex).attrs((e=>function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?sn(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):sn(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({column:!0,width:"100%",position:"absolute",background:"panelBg",border:{side:"all",color:"border"},bottom:"45px"},e))).withConfig({displayName:"composerSelector__Container",componentId:"sc-123lnm3-0"})(["box-shadow:0 4px 4px 0 rgba(0,0,0,0.25);"]),An=e=>{let{selector:n,onClose:t}=e;const[o,a]=(0,i.useState)(0);return null!==n&&void 0!==n&&n.Component?((0,i.useEffect)((()=>{a(((e,n)=>{if(!e||!n)return null;const t=document.getElementById(e),o=document.getElementById(n);if(!t||!o)return null;const{top:i}=t.getBoundingClientRect(),{top:a}=o.getBoundingClientRect();return a-i-50})("chat-ai-modal-body-container","chat-ai-modal-composer-tools-menu-accessor"))}),[a]),(0,b.jsxs)(rn,{children:[(0,b.jsxs)(c.Flex,{alignItems:"center",justifyContent:"between",padding:[2,4],border:{side:"bottom",color:"border"},children:[(0,b.jsxs)(c.Flex,{column:!0,gap:1,children:[(0,b.jsx)(c.TextBig,{children:null===n||void 0===n?void 0:n.windowTitle}),null!==n&&void 0!==n&&n.windowSubtitle?(0,b.jsx)(c.TextSmall,{children:n.windowSubtitle}):null]}),(0,b.jsx)(c.Button,{flavour:"borderless",neutral:!0,icon:"x",onClick:t})]}),(0,b.jsx)(n.Component,{height:o,onClose:t})]})):null},cn=e=>{let{disabled:n}=e;const{activeConversation:t,setAssociatedItemSelector:o}=(0,h.A)(),{associatedItemSelector:a}=t||{},s=(0,i.useCallback)((()=>o(null)),[o]);return(0,b.jsxs)(c.Flex,{alignItems:"baseline",gap:2,children:[(0,b.jsx)(tn,{disabled:n,onSelectorClick:o}),(0,b.jsx)(an,{}),a?(0,b.jsx)(An,{selector:a,onClose:s}):null]})};var ln=t(60908);const dn=e=>{let{canChat:n,isPaid:t,onClose:o}=e;const[a,s]=(0,i.useState)(""),{sendMessage:r}=(0,h.A)();return(0,b.jsx)(ln.Ay,{element:"WelcomeView",children:(0,b.jsx)(c.Flex,{width:"100%",flex:"grow",column:!0,alignItems:"center",justifyContent:"center",children:(0,b.jsxs)(c.Flex,{width:"100%",column:!0,gap:4,alignItems:"center",children:[(0,b.jsx)(Ce,{}),(0,b.jsx)(He,{canChat:n,isPaid:t,onClose:o}),(0,b.jsx)(Pe,{testId:"chat-ai-welcome-view-composer",value:a,placeholder:"Ask anything about your infrastructure, nodes, alerts or metrics...",disabled:!n,onSend:r,ToolsComponent:cn},a),(0,b.jsx)(c.Flex,{"data-testid":"chat-ai-sample-items",flexWrap:!0,width:"100%",alignItems:"center",gap:4,children:(0,b.jsx)(Qe,{canChat:n,setComposerValue:s})})]})})})};t(8872),t(37550);var un=t(73700),hn=t(85660);const mn=["hidden"],gn=["ref","autoScrollToBottom","showScrollToBottomIcon","children","bottomAnchorProps"];function pn(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function fn(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?pn(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):pn(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const yn=(0,A.default)(c.Flex).attrs((e=>fn({position:"sticky",bottom:"0",alignItems:"center",justifyContent:"center"},e))).withConfig({displayName:"thread__ScrollToBottomContainer",componentId:"sc-1w94kdx-0"})(["background:linear-gradient(to bottom,rgba(0,0,0,0),",");",""],(0,c.getRgbColor)("mainBackground",.7),(e=>{let{hidden:n}=e;return n?"opacity: 0; z-index: -100;":""})),bn=(0,A.default)(c.Flex).attrs((e=>fn({alignItems:"center",justifyContent:"center",width:"36px",height:"36px",round:"50%",background:"inputBg",border:{side:"all",color:"inputBorder"},_hover:{border:"inputBorderHover"},cursor:"pointer"},e))).withConfig({displayName:"thread__ScrollToBottomIconContainer",componentId:"sc-1w94kdx-1"})(["filter:drop-shadow(0 8px 10px ",");&:hover{> svg{fill:",";}}"],(0,c.getColor)("mainBackground"),(0,c.getColor)("text")),En=(0,A.default)(c.Icon).attrs((e=>fn({name:"arrow_left",color:"textLite",rotate:3},e))).withConfig({displayName:"thread__ScrollToBottomIcon",componentId:"sc-1w94kdx-2"})([""]),wn=e=>{let{hidden:n}=e,t=(0,r.A)(e,mn);return(0,b.jsx)(yn,{hidden:n,children:(0,b.jsx)(bn,fn(fn({},t),{},{children:(0,b.jsx)(En,{})}))})},Bn=e=>{let{ref:n,autoScrollToBottom:t,showScrollToBottomIcon:o=!0,children:a,bottomAnchorProps:s={}}=e,A=(0,r.A)(e,gn);const l=(0,i.useRef)(null),d=(0,i.useRef)(null),[u,h]=(0,i.useState)(!1),m=(0,i.useCallback)((()=>{d.current&&d.current.scrollIntoView({behavior:"smooth"})}),[]),g=(0,i.useCallback)((0,un.n)(100,(()=>{if(o&&l.current){const e=l.current.scrollTop,n=l.current.scrollHeight,t=l.current.clientHeight;h(n-e-t>10)}})),[o,h]),[p]=(0,hn.A)({callback:g});return(0,i.useEffect)((()=>{t&&m()}),[t,a,m]),(0,i.useEffect)((()=>{g()}),[a,g]),(0,b.jsxs)(c.Flex,fn(fn({"data-testid":"chat-thread-container",ref:(0,c.mergeRefs)(l,p,n),position:"relative",height:"100%",flex:"grow",column:!0,gap:4,padding:[0,2,0,0],overflow:{vertical:"auto"},onScroll:g},A),{},{children:[a,(0,b.jsx)(wn,{hidden:!u,onClick:m}),(0,b.jsx)("div",fn({ref:d},s))]}))},Cn=["children"];function Mn(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Tn(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Mn(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Mn(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const In=e=>{let{children:n}=e,t=(0,r.A)(e,Cn);return(0,b.jsx)(c.Flex,Tn(Tn({"data-testid":"chat-agent-message-container",padding:[6,0]},t),{},{children:n}))};var vn=t(6084),_n=t(46587);const Qn=["children"];function Dn(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function xn(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Dn(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Dn(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const kn=(0,A.default)(c.Box).withConfig({displayName:"userMessage__Container",componentId:"sc-1fns94u-0"})(["display:grid;grid-template-columns:auto 32px;gap:8px;"]),Rn=e=>{let{children:n}=e,t=(0,r.A)(e,Qn);const o=(0,_n.uW)("avatarURL");return(0,b.jsxs)(kn,xn(xn({"data-testid":"chat-user-message-container"},t),{},{children:[(0,b.jsx)(c.Flex,{justifyContent:"end",children:(0,b.jsx)(c.Flex,{alignSelf:"start",padding:[2,4],border:{side:"all",color:"border"},round:!0,children:n})}),(0,b.jsx)(vn.A,{src:o||""})]}))};var Sn=t(3941);const Pn=()=>{const[e,n]=(0,i.useState)(0);return(0,Sn.A)((0,i.useCallback)((()=>{n((e=>3===e?0:e+1))}),[]),250),(0,b.jsx)(c.TextBig,{children:Array(e).fill(".").join("")})},Fn=e=>{let{label:n="Thinking",onStop:t}=e;const o="function"===typeof t;return(0,b.jsxs)(c.Flex,{alignItems:"center",gap:2,children:[o?(0,b.jsx)(c.Flex,{children:(0,b.jsx)(c.Button,{neutral:!0,flavour:"hollow",icon:"pauseSolid",iconColor:"offline",onClick:t})}):null,(0,b.jsxs)(c.TextBig,{color:"tooltipText",children:[n,(0,b.jsx)(Pn,{})]})]})};t(72577);var Yn=t(54852),Un=t(32277);const Nn=e=>{let{children:n}=e;return(0,b.jsx)(Yn.A,{transformConfiguration:{nodes:Un.A},children:n})},jn=e=>{let{role:n,text:t}=e;return"assistant"===n?(0,b.jsx)(Nn,{children:t}):"user"===n?(0,b.jsx)(c.TextBig,{lineHeight:1.5,children:t}):null};t(33110);function zn(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Hn(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?zn(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):zn(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const On=e=>{let{label:n,isCollapsible:t,contentContainerProps:o={},children:i}=e;const[a,s]=(0,D.A)();return(0,b.jsxs)(c.Flex,{column:!0,gap:1,children:[(0,b.jsxs)(c.Flex,{alignItems:"center",gap:2,children:[(0,b.jsx)(c.Text,{color:"textLite",children:n}),t?(0,b.jsx)(c.Icon,{name:"chevron_left",size:"small",color:"textLite",rotate:a?1:3,onClick:s,cursor:"pointer"}):null]}),a||!t?(0,b.jsx)(c.Flex,Hn(Hn({height:{min:"40px",max:"300px"},overflow:{vertical:"auto"}},o),{},{children:i})):null]})};var Ln=t(89975),Gn=t(23392),Jn=t(37528),qn=t(55164);const Kn=["after","before"];function Vn(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Xn(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Vn(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Vn(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Wn=e=>{try{return new Date(e)}catch(n){return null}},Zn=e=>e||{},$n={custom_bar_chart_block:"custom_bar_chart_block",custom_bubble_chart_block:"custom_bubble_chart_block",custom_timeseries_chart_block:"custom_timeseries_chart_block",load_chart_block:"load_chart_block",load_systemd_journal_block:"load_systemd_journal_block",load_windows_events_block:"load_windows_events_block"},et={[$n.custom_bar_chart_block]:e=>{if(!e)return{};const{datasets:n=[]}=e||{};return Xn(Xn({},e),{},{chartType:"bar",datasets:n.map((e=>Xn(Xn({},e),{},{data:e.data.map((n=>Xn(Xn({},n),!n.color&&e.color?{color:e.color}:{})))})))})},[$n.custom_bubble_chart_block]:e=>e?Xn(Xn({},e),{},{chartType:"bubble"}):{},[$n.custom_timeseries_chart_block]:e=>e?Xn(Xn({},e),{},{chartType:"line"}):{},[$n.load_chart_block]:e=>{if(!e)return{};const{after:n,before:t}=e;return Xn(Xn({},(0,r.A)(e,Kn)),{},{after:Wn(n),before:Wn(t)})},[$n.load_systemd_journal_block]:Zn,[$n.load_windows_events_block]:Zn,deafult:Y()};function nt(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function tt(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?nt(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):nt(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ot=e=>{try{return JSON.stringify("string"===typeof e?JSON.parse(e):e,null,2)}catch(n){return String(e)}},it=A.default.pre.withConfig({displayName:"details__StyledPre",componentId:"sc-ae93ia-0"})(["padding:8px 16px;border-radius:4px;> code{font-size:12px;}"]),at=()=>(0,b.jsxs)(c.Flex,{gap:1,padding:[4,0],flex:"grow",background:"modalBackground",alignItems:"center",justifyContent:"center",children:[(0,b.jsx)(c.Icon,{name:"warning_triangle",color:"warning"}),(0,b.jsx)(c.Text,{children:"Something went wrong while loading this chart."})]}),st={[$n.custom_bar_chart_block]:Ln.A,[$n.custom_bubble_chart_block]:Ln.A,[$n.custom_timeseries_chart_block]:Ln.A,[$n.load_chart_block]:Gn.A,[$n.load_systemd_journal_block]:Jn.A,[$n.load_windows_events_block]:Jn.A,default:null},rt=e=>{let{name:n,input:t,toolResult:o={}}=e;const i=et[n]||et.deafult,a=(e=>{try{return"string"===typeof e?JSON.parse(e):e}catch(n){return{}}})(t),s=i(a),r=st[n]||st.default,A=(e=>n=>(0,b.jsx)(c.Flex,{"data-testid":"chat-ai-tool-result-container",column:!0,height:{max:"500px"},overflow:{vertical:"auto"},children:(0,b.jsx)(e,tt({},n))}))(r);return Object.keys($n).includes(n)&&r?(0,b.jsx)(A,{data:s}):(0,b.jsx)(On,{label:"Result",contentContainerProps:{height:{min:"100px",max:"500px"}},children:(0,b.jsx)(Nn,{children:null===o||void 0===o?void 0:o.text})})},At=(0,i.memo)((e=>{let{name:n,input:t,toolResult:o={}}=e;return(0,b.jsxs)(c.Flex,{column:!0,gap:4,children:[(0,b.jsx)(On,{label:"Input",isCollapsible:!0,contentContainerProps:{background:"panelBg"},children:(0,b.jsx)(it,{children:(0,b.jsx)("code",{children:ot(t)})})}),(0,b.jsx)(qn.Ay,{fallback:at,children:(0,b.jsx)(rt,{name:n,input:t,toolResult:o})})]})}));function ct(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function lt(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?ct(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):ct(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const dt=(0,A.default)(c.Icon).attrs((e=>lt({name:"refresh",color:"textLite"},e))).withConfig({displayName:"toolUseRenderer__Loader",componentId:"sc-1hq0g23-0"})(["animation:1s rotation infinite linear;@keyframes rotation{from{transform:rotate(0deg);}to{transform:rotate(360deg);}}"]),ut=e=>{let{name:n,input:t,toolResult:o={}}=e;const i=Object.keys($n).includes(n),[a,s]=(0,D.A)(i);return(0,b.jsxs)(c.Flex,lt(lt({},a?{width:"100%"}:{}),{},{column:!0,gap:2,padding:[2,4],border:{side:"all",color:"border"},round:!0,children:[(0,b.jsxs)(c.Flex,{alignItems:"center",gap:2,children:[(0,b.jsx)(c.TextBig,{color:"textLite",children:n}),null!==o&&void 0!==o&&o.text?(0,b.jsx)(c.Icon,{name:"chevron_left",size:"small",color:"textLite",rotate:a?1:3,onClick:s,cursor:"pointer"}):(0,b.jsx)(dt,{})]}),a?(0,b.jsx)(At,{name:n,input:t,toolResult:o}):null]}))},ht=(0,i.memo)((e=>{let{conversationId:n,error:t}=e;const o=(0,V.A)({showLoading:!0}),a=(0,i.useCallback)((()=>{o(n)}),[n,o]);return(0,b.jsxs)(c.Flex,{column:!0,padding:[2,4],children:[(0,b.jsx)(c.TextBig,{color:"error",children:t}),n?(0,b.jsx)(c.Button,{flavour:"borderless",label:"Reload conversation",icon:"refresh",neutral:!0,onClick:a}):null]})})),mt=e=>{let{message:n}=e;const{activeConversation:t,setReportMode:o}=(0,h.A)(),{content:a=[],role:s}=n||{};(0,i.useEffect)((()=>{var e;if(null===t||void 0===t||!t.id||"assistant"!==s)return;const n=null===(e=a.find((e=>"tool_use"===e.type&&"trigger_report"===e.name)))||void 0===e?void 0:e.id,i=n?a.find((e=>"tool_result"===e.type&&e.id===n)):null;if(i)try{const{status:e}=JSON.parse(i.text);"success"===e&&o(!1)}catch(r){}}),[null===t||void 0===t?void 0:t.id,a,s,o])};function gt(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function pt(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?gt(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):gt(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ft=e=>{let{conversationId:n,message:t}=e;const{content:o=[],role:i}=t||{};return mt({message:t}),(0,b.jsx)(c.Flex,{"data-testid":"chat-ai-message-renderer",column:!0,gap:2,alignItems:"start",children:o.map((e=>{if("text"===e.type)return(0,b.jsx)(jn,pt({role:i},e),e.text);if("tool_use"===e.type){const n=o.find((n=>"tool_result"===n.type&&n.id===e.id));return(0,b.jsx)(ut,pt({role:i,toolResult:n},e),e.id)}return"error"===e.type?(0,b.jsx)(ht,pt({role:i,conversationId:n},e),e.error):null}))})};var yt=t(24155);const bt=()=>(0,b.jsxs)(c.Flex,{column:!0,gap:4,children:[(0,b.jsx)(c.Flex,{justifyContent:"end",children:(0,b.jsx)(yt.A,{height:"40px",width:"170px"})}),(0,b.jsxs)(c.Flex,{column:!0,gap:2,children:[(0,b.jsx)(yt.A,{height:"20px",width:"60%"}),(0,b.jsx)(yt.A,{height:"20px",width:"80%"}),(0,b.jsx)(yt.A,{height:"20px",width:"70%"}),(0,b.jsx)(yt.A,{height:"20px",width:"90%"}),(0,b.jsx)(yt.A,{height:"20px",width:"50%"})]}),(0,b.jsx)(c.Flex,{justifyContent:"end",children:(0,b.jsx)(yt.A,{height:"40px",width:"170px"})}),(0,b.jsxs)(c.Flex,{column:!0,gap:2,children:[(0,b.jsx)(yt.A,{height:"20px",width:"80%"}),(0,b.jsx)(yt.A,{height:"20px",width:"60%"}),(0,b.jsx)(yt.A,{height:"20px",width:"90%"}),(0,b.jsx)(yt.A,{height:"20px",width:"50%"}),(0,b.jsx)(yt.A,{height:"20px",width:"70%"})]})]});var Et=t(18121);const wt=()=>{const{activeConversation:e,sendMessage:n,stopProcessing:t}=(0,h.A)(),{id:o,loading:i,isThinking:a,thinkingLabel:s,isParsing:r,messages:A=[]}=e||{},[c,l]=(0,Et.useHovered)({},[]);return i?(0,b.jsx)(bt,{}):(0,b.jsxs)(ln.Ay,{element:"ChatView",children:[(0,b.jsxs)(Bn,{ref:c,"data-testid":"chat-ai-chat-view-thread",autoScrollToBottom:!l&&(a||r),height:"100px",bottomAnchorProps:{id:Ge.s},children:[A.reduce(((e,n)=>{var t;if(e.hasError||null===n||void 0===n||!n.id)return e;const i=null===n||void 0===n||null===(t=n.content)||void 0===t?void 0:t.some((e=>"error"===e.type)),a="assistant"===(null===n||void 0===n?void 0:n.role)?In:"user"===(null===n||void 0===n?void 0:n.role)?Rn:null;return a&&e.elements.push((0,b.jsx)(a,{children:(0,b.jsx)(ft,{conversationId:o,message:n})},n.id)),i&&(e.hasError=!0),e}),{elements:[],hasError:!1}).elements,a?(0,b.jsx)(Fn,{label:s,onStop:t}):r?(0,b.jsx)(Fn,{onStop:t}):null]}),(0,b.jsx)(Pe,{testId:"chat-ai-chat-view-composer",highlight:!0,onSend:n,disabled:i||a||r,ToolsComponent:cn})]})};var Bt=t(22426),Ct=t(32052);function Mt(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Tt(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Mt(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Mt(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const It=e=>{let{id:n,loaded:t,onSuccess:o}=e;const a=(0,d.vt)(),s=(0,T.ID)(),{setActiveConversation:r}=(0,Bt.i)(),A=(0,i.useCallback)((e=>{r(Tt(Tt({},e),{},{loading:!1,loaded:!0})),"function"===typeof o&&o()}),[r,o]),c=(0,i.useCallback)((()=>{}),[]);(0,Ct.A)((()=>({enabled:!!n&&!t,fetch:()=>(r((e=>Tt(Tt({},e),{},{loading:!0}))),(0,I.ik)({spaceId:a,roomId:s,id:n})),onSuccess:A,onFail:c})),[a,s,n,t])},vt=()=>{setTimeout((()=>{const e=document.getElementById(Ge.s);e&&e.scrollIntoView({behavior:"smooth"})}),300)},_t=e=>{let{id:n,loaded:t}=e;return It({id:n,loaded:t,onSuccess:vt}),null},Qt=e=>{let{canChat:n,isPaid:t,onClose:o}=e;const{activeConversation:i}=(0,h.A)(),{id:a,title:s,loaded:r,metadata:A,subjects:l}=i||{};return(0,b.jsxs)(b.Fragment,{children:[n&&t?(0,b.jsx)(_t,{id:a,loaded:r}):null,(0,b.jsx)(c.Flex,{id:"chat-ai-modal-body-container","data-testid":"chat-ai-modal-body-container",width:"100%",flex:"grow",justifyContent:"center",children:(0,b.jsx)(c.Flex,{"data-testid":"chat-ai-modal-chat-container",position:"relative",width:{base:"100%",max:"860px"},flex:"grow",column:!0,gap:2,children:s?(0,b.jsxs)(b.Fragment,{children:[(0,b.jsx)(he,{id:a,title:s,metadata:A,subjects:l,canChat:n}),(0,b.jsx)(wt,{})]}):(0,b.jsx)(dn,{canChat:n,isPaid:t,onClose:o})})})]})},Dt=e=>{let{conversations:n=[]}=e;const t=n.length||0;return(0,b.jsxs)(c.Flex,{column:!0,gap:1,children:[(0,b.jsx)(c.TextBig,{color:"primaryAI",children:"Your chats"}),(0,b.jsxs)(c.Text,{color:"textLite",children:[t," conversation",1===t?"":"s"]})]})},xt=e=>{let{value:n,onChange:t}=e;return(0,b.jsx)(c.SearchInput,{value:n,placeholder:"Search conversations",onChange:t})};var kt=t(51220),Rt=t(42561),St=t(64587);const Pt=e=>{let{id:n,title:t}=e;const{setActiveConversation:o}=(0,Bt.i)();return(0,i.useCallback)((()=>{o({id:n,title:t})}),[n,t,o])};function Ft(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const Yt=(0,A.default)(c.Flex).attrs((e=>function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Ft(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Ft(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({alignItems:"center",justifyContent:"between",background:(0,c.getRgbColor)("panelBg",.3),padding:[2,3],border:{side:"all",color:"border"},round:!0},e))).withConfig({displayName:"conversationItem__ConversationItemContainer",componentId:"sc-14b5eaz-0"})(["&:hover{background-color:",";cursor:pointer;}"],(0,c.getRgbColor)("panelBg",.9)),Ut=e=>{let{id:n,title:t,createdAt:o,updatedAt:a,canChat:s,onTabChange:r}=e;const{localeDateString:A,localeTimeString:l}=(0,St.$j)(),d=a||o,u=d?"".concat(A(new Date(d))," ").concat(l(new Date(d),{secs:!1})):null,h=Pt({id:n,title:t}),{sendLog:g}=(0,_.A)(),p=(0,i.useCallback)((e=>{(0,Rt.A)(e.target).some((e=>{var n,t;return(null===(n=e.classList)||void 0===n?void 0:n.contains("btn-conversation-item-delete"))||(null===(t=e.classList)||void 0===t?void 0:t.contains("conversation-item-delete-modal"))}))||s&&(h(),r(m),g({element:"ConversationItem",description:"Conversation item clicked",conversationId:n,conversationTitle:t},!0))}),[s,h,r,g]);return(0,b.jsxs)(Yt,{"data-testid":"chat-ai-conversations-list-item",onClick:p,children:[(0,b.jsxs)(c.Flex,{column:!0,gap:1,children:[(0,b.jsx)(c.TextBig,{children:t||n}),u&&(0,b.jsx)(c.Text,{color:"textLite",children:u})]}),(0,b.jsx)(c.Flex,{alignItems:"center",children:(0,b.jsx)(P,{id:n,title:t,disabled:!s})})]})};function Nt(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function jt(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Nt(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Nt(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const zt=e=>{let{conversations:n=[],canChat:t,onTabChange:o,height:a="300px",rowHeight:s=54}=e;const r=(0,i.useRef)(),A=(0,i.useCallback)((()=>s),[s]),c=(0,kt.Te)({count:n.length,getScrollElement:()=>r.current,enableSmoothScroll:!1,estimateSize:A,gap:8});return(0,b.jsx)("div",{ref:r,style:{height:a,overflow:"auto",padding:"0 4px",margin:"0 -4px"},children:(0,b.jsx)("div",{style:{minHeight:"".concat(c.getTotalSize(),"px"),width:"100%",position:"relative"},children:c.getVirtualItems().map((e=>(0,b.jsx)("div",{style:{position:"absolute",top:0,left:0,width:"100%",transform:"translateY(".concat(e.start,"px)"),overflow:"hidden"},ref:c.measureElement,"data-index":e.index,children:(0,b.jsx)(Ut,jt(jt({},n[e.index]),{},{canChat:t,onTabChange:o}))},e.key)))})})},Ht=()=>(0,b.jsxs)(c.Flex,{column:!0,gap:2,children:[(0,b.jsx)(yt.A,{}),(0,b.jsx)(yt.A,{}),(0,b.jsx)(yt.A,{height:"54px"}),(0,b.jsx)(yt.A,{height:"54px"}),(0,b.jsx)(yt.A,{height:"54px"})]});var Ot=t(47090);const Lt=e=>{let{error:n}=e;const t=(0,Ot.o)(null===n||void 0===n?void 0:n.errorMsgKey,null)||(null===n||void 0===n?void 0:n.errorMessage)||(null===n||void 0===n?void 0:n.message)||"Something went wrong";return(0,b.jsx)(c.Flex,{height:"100%",alignItems:"center",justifyContent:"center",children:(0,b.jsxs)(c.Flex,{column:!0,gap:2,children:[(0,b.jsx)(c.TextBig,{textAlign:"center",children:"Error"}),(0,b.jsx)(c.Text,{children:t})]})})},Gt=e=>{let{canChat:n,isPaid:t,onTabChange:o,onClose:a}=e;const s=(0,i.useRef)(),r=(0,i.useRef)(),[A,l]=(0,i.useState)(),{loaded:d,conversations:u,searchTerm:h,onSearchChange:m,error:g}=(0,M.A)();return(0,i.useEffect)((()=>{if(s.current&&r.current){var e,n;const t=null===(e=s.current.getBoundingClientRect())||void 0===e?void 0:e.height,o=null===(n=r.current.getBoundingClientRect())||void 0===n?void 0:n.height;l(t-o-12)}}),[s.current,r.current]),d?g?(0,b.jsx)(Lt,{error:g}):(0,b.jsx)(ln.Ay,{element:"ConversationsList",children:(0,b.jsxs)(c.Flex,{ref:s,"data-testid":"chat-ai-conversations-list-container",height:"100%",column:!0,gap:2,overflow:"hidden",children:[(0,b.jsxs)(c.Flex,{ref:r,column:!0,gap:2,children:[(0,b.jsx)(Dt,{conversations:u}),(0,b.jsx)(xt,{value:h,onChange:m})]}),(0,b.jsx)(He,{canChat:n,isPaid:t,onClose:a,margin:[4,0,0,0]}),(0,b.jsx)(zt,{conversations:u,canChat:n,onTabChange:o,height:"".concat(A,"px")})]})}):(0,b.jsx)(Ht,{})},Jt=["activeTab"];function qt(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const Kt={[m]:Qt,[g]:Gt,[p]:()=>{}},Vt=e=>{let{activeTab:n}=e,t=(0,r.A)(e,Jt);const a=(0,i.useMemo)((()=>Kt[n]),[n]);return(0,b.jsx)(a,function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?qt(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):qt(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({},t))};var Xt=t(62718);const Wt=()=>(0,b.jsx)(c.Flex,{flex:"grow",alignItems:"center",justifyContent:"center",children:(0,b.jsxs)(c.Flex,{width:"100%",column:!0,gap:4,children:[(0,b.jsx)(c.Flex,{justifyContent:"center",children:(0,b.jsx)(Xt.Bp,{size:"96px"})}),(0,b.jsx)(yt.A,{height:"112px"}),(0,b.jsxs)(c.Flex,{alignItems:"center",justifyContent:"between",children:[(0,b.jsx)(yt.A,{width:"48%",height:"42px"}),(0,b.jsx)(yt.A,{width:"48%",height:"42px"})]})]})});var Zt=t(49676),$t=t(7066),eo=t(99728),no=t(76375),to=t(10444);const oo=["activeTab","expanded","disableDrag","onExpandClick","onTabChange","onClose"];function io(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function ao(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?io(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):io(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const so=(0,A.default)(c.Flex).attrs((e=>ao({position:"fixed",width:"1px",height:"1px",top:"".concat(72,"px"),left:"".concat(60,"px"),zIndex:50},e))).withConfig({displayName:"chatModal__FixedContainer",componentId:"sc-icxbs-0"})([""]),ro=(0,A.default)(Zt.A).withConfig({displayName:"chatModal__ChatModalContainer",componentId:"sc-icxbs-1"})(["display:flex;flex-direction:column;height:80vh;width:",";min-width:350px;min-height:350px;gap:16px;padding:0 16px 16px 16px;border:1px solid ",";border-radius:2px;z-index:50;background:",";backdrop-filter:blur(40px);box-shadow:0 4px 16px rgba(0,0,0,0.12);"],(e=>{let{expanded:n}=e;return n?"calc(100vw - ".concat(120,"px)"):"587px"}),(0,c.getColor)("border"),(0,c.getRgbColor)("mainBackground",.7)),Ao=e=>{let{children:n,disabled:t,position:o}=e;const{attributes:i,listeners:a,setNodeRef:s,transform:r}=(0,l.PM)({id:"assistant-modal",disabled:t}),A=o.x+((null===r||void 0===r?void 0:r.x)||0),c=o.y+((null===r||void 0===r?void 0:r.y)||0),d={transform:"translate3d(".concat(A,"px, ").concat(c,"px, 0)"),touchAction:"none",zIndex:10};return(0,b.jsx)("div",{ref:s,style:d,children:"function"===typeof n?n({listeners:a,attributes:i}):n})},co=e=>{let{activeTab:n,expanded:t,disableDrag:o,onExpandClick:a,onTabChange:s,onClose:A}=e,c=(0,r.A)(e,oo);const u=(0,i.useRef)(null),h=(0,i.useRef)(null),m="true"===t,g=(0,d.vt)(),{loaded:p,isPaid:f}=(0,no.A)(),y=!!g&&!!p,E=(0,eo.JT)("insights:CreateReport"),[w,B]=(0,i.useState)({x:0,y:0}),M=(0,to.J)();return(0,i.useEffect)((()=>{h.current&&(h.current.style.removeProperty("width"),h.current.style.removeProperty("height")),u.current&&u.current.style.removeProperty("transform"),B({x:0,y:0})}),[m,B]),(0,b.jsx)(ln.Ay,{feature:"NetdataAI",element:"ChatModal",children:(0,b.jsx)(so,{"data-testid":"chat-ai-fixed-container",children:(0,b.jsx)(l.Mp,{onDragEnd:e=>{let{delta:n}=e;B((e=>({x:e.x+n.x,y:e.y+n.y})))},children:(0,b.jsx)(Ao,{ref:u,disabled:o||M,position:w,children:e=>{let{listeners:t,attributes:o}=e;return(0,b.jsxs)(ro,ao(ao({ref:h,"data-testid":"chat-ai-modal-container",expanded:m},c),{},{children:[y?(0,b.jsxs)(b.Fragment,{children:[(0,b.jsx)(C,{listeners:t,attributes:o,activeTab:n,expanded:m,onExpandClick:a,onTabChange:s,onClose:A}),(0,b.jsx)(Vt,{canChat:E,isPaid:f,activeTab:n,onTabChange:s,onClose:A})," "]}):(0,b.jsx)(Wt,{}),(0,b.jsx)($t.A,{})]}))}})})})})};function lo(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function uo(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?lo(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):lo(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ho=()=>{const{isModalOpen:e,currentModalTab:n,handleChangeModalTab:t,handleOpenModal:o,handleCloseModal:r,params:A}=(0,s.A)("aiChatModal"),{reset:c}=(0,Bt.i)(),l=!(0,d.dg)({defaultValue:!0}),{expanded:u}=A||{},h=(0,i.useCallback)((()=>{o(n||y,uo(uo({},A||{}),{},{expanded:"true"===(null===A||void 0===A?void 0:A.expanded)?"false":"true"}))}),[n,o,A]),m=(0,i.useCallback)((()=>{r(),c()}),[r,c]);(0,a.A)(m);const g=(0,i.useMemo)((()=>(0,b.jsx)(co,{activeTab:n||y,expanded:u,onExpandClick:h,onTabChange:t,onClose:m})),[u,h,m]);return{accessorEnabled:l,isOpen:e,open:o,close:m,modal:g}}},75946(e,n,t){"use strict";t.d(n,{A:()=>d});var o=t(64467),i=(t(98992),t(54520),t(3949),t(8872),t(96540)),a=t(24609),s=t(19186),r=t(22426),A=t(69306);function c(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function l(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?c(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):c(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const d=function(){let{props:e=[],showLoading:n}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const t=(0,a.vt)(),o=(0,s.ID)(),{setActiveConversation:c}=(0,r.i)();return(0,i.useCallback)((i=>{i&&(n&&c((e=>l(l({},e),{},{loading:!0}))),(0,A.ik)({spaceId:t,roomId:o,id:i}).then((t=>{let{data:o}=t;if(o)if(e.length){const n=e.reduce(((e,n)=>l(l({},e),{},{[n]:o[n]})),{});c((e=>l(l(l({},e),n),{},{loading:!1,loaded:!0})))}else c(l(l({},o),{},{loading:!1,loaded:!0}));else n&&c((e=>l(l({},e),{},{loading:!1,loaded:!0})))})).catch((()=>{})))}),[t,o,n,c])}},22426(e,n,t){"use strict";t.d(n,{i:()=>g,E:()=>p});t(62953);var o=t(96540),i=t(34843),a=t(25316),s=t(18790),r=t(84929),A=t(69306),c=t(57377);const l=(0,s.I)((()=>(0,r.tx)()),c.Ay),d=(0,s.I)((e=>{let{spaceId:n,roomId:t}=e;return(0,r.z0)((()=>n&&t?(0,A.bX)({spaceId:n,roomId:t}).then((e=>{let{data:n}=e;return n})):Promise.resolve({})))}),c.Ay);var u=t(46587),h=t(24609),m=t(19186);const g=()=>{const e=(0,u.uW)("id"),n=(0,h.vt)(),t=(0,m.ID)(),[a,s]=(0,i.fp)(l({spaceId:n,roomId:t,currentUserId:e})),r=(0,o.useCallback)((()=>s()),[s]);return{activeConversation:a,setActiveConversation:s,reset:r}},p=()=>{var e;const n=(0,u.uW)("id"),t=(0,h.vt)(),o=(0,m.ID)(),s=d({spaceId:t,roomId:o,currentUserId:n}),r=(0,i.md)((0,a.A)(s)),A=(0,i.Xr)(s);return{loaded:"loading"!==r.state,value:Array.isArray(r.data)?r.data:[],error:"hasError"===r.state?null===(e=r.error)||void 0===e||null===(e=e.response)||void 0===e?void 0:e.data:null,reset:()=>A()}}},92942(e,n,t){"use strict";t.d(n,{A:()=>m});var o=t(64467),i=t(80045),a=(t(89463),t(98992),t(54520),t(3949),t(81454),t(42358)),s=t(69029),r=t(79748),A=t(74848);const c=["bugs"];function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function d(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?l(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):l(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const u=e=>{let{bugKey:n}=e;const t=s.c[n],{description:o,url:i}=t||{};return o?(0,A.jsxs)(a.Flex,{column:!0,gap:1,children:[(0,A.jsx)(a.TextMicro,{children:o}),i?(0,A.jsx)(r.A,{Component:a.TextMicro,href:i,target:"_blank",rel:"noopener noreferrer",as:"a",cursor:"pointer",children:"Check details"}):null]}):null},h=e=>{let{bugs:n}=e;return(0,A.jsxs)(a.Flex,{column:!0,width:{max:"200px"},gap:2,children:[(0,A.jsxs)(a.Flex,{alignItems:"center",gap:2,children:[(0,A.jsx)(a.Icon,{name:"warning_triangle_hollow",color:"error"}),(0,A.jsx)(a.TextMicro,{children:"Bug found"})]}),n.map((e=>(0,A.jsx)(u,{bugKey:e},e)))]})},m=e=>{let{bugs:n}=e,t=(0,i.A)(e,c);const o=n.length>1?"".concat(n.length," bugs found"):"Bug found";return n.length?(0,A.jsx)(a.Tooltip,{allowHoverOnTooltip:!0,content:(0,A.jsx)(h,{bugs:n}),children:(0,A.jsx)(a.Pill,d(d({icon:"warning_triangle_hollow",flavour:"error"},t),{},{children:o}))}):null}},55759(e,n,t){"use strict";t.d(n,{Ay:()=>p,FB:()=>u,TZ:()=>h,mK:()=>g,uL:()=>d});t(98992),t(54520),t(3949);var o=t(64467),i=t(42358),a=t(63084),s=t(74848);function r(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const A="delete-node-dialog",c="deleteNodeDialog",l=e=>{const n=1===e;return"Delete ".concat(e," ").concat(n?"node":"nodes")},d=e=>"Delete ".concat(e," node"),u=e=>{let{name:n,nodesLength:t}=e;return t?1===t&&n?d(n):l(t):""},h=e=>{let{name:n}=e;return(0,s.jsxs)(s.Fragment,{children:["You are about to delete offline node ",(0,s.jsx)("strong",{children:n}),".",(0,s.jsx)("br",{}),"Node will be removed from all rooms and will no longer be accessible. Plus all metadata will be removed.",(0,s.jsx)("br",{}),"Are you sure you want to continue?"]})},m=e=>{let{nodesLength:n}=e;const t=1===n;return(0,s.jsxs)(s.Fragment,{children:["You are about to delete ",t?"this":"these"," ",(0,s.jsxs)("strong",{children:[n," offline ",t?"node":"nodes"]}),".",(0,s.jsx)("br",{}),t?"Node":"Nodes"," will be removed from all rooms and will no longer be accessible. Plus all metadata will be removed.",(0,s.jsx)("br",{}),"Are you sure you want to continue?"]})},g=e=>{let{name:n,nodesLength:t}=e;return t?1===t&&n?(0,s.jsx)(h,{name:n}):(0,s.jsx)(m,{nodesLength:t}):""},p=e=>{let{ids:n=[],name:t,onClose:u}=e;const g=(0,a.A)(),p=t?{"data-ga":"".concat(A,"-with-name"),"data-testid":"".concat(c,"WithName"),message:(0,s.jsx)(h,{name:t}),title:d(t)}:{"data-ga":"".concat(A,"Bulk"),"data-testid":"".concat(c,"Bulk"),message:(0,s.jsx)(m,{nodesLength:n.length}),title:l(n.length)};return(0,s.jsx)(i.ConfirmationDialog,function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?r(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):r(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({confirmLabel:"Yes, delete",handleConfirm:async()=>{await g(n)},handleDecline:u},p))}},77186(e,n,t){"use strict";t.d(n,{Ay:()=>f,G:()=>m,VN:()=>p,aq:()=>h,fS:()=>d});t(98992),t(54520),t(3949);var o=t(64467),i=t(42358),a=t(4701),s=t(19186),r=t(74848);function A(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const c="remove-node-dialog",l="removeNodeDialog",d=e=>"Remove ".concat(e," node"),u=e=>{const n=1===e;return"Remove ".concat(e," ").concat(n?"node":"nodes")},h=e=>{let{name:n,nodesLength:t}=e;return t?1===t&&n?d(n):u(t):""},m=e=>{let{name:n,roomName:t}=e;return(0,r.jsxs)(r.Fragment,{children:["You are about to remove ",(0,r.jsx)("strong",{children:n})," from room ",(0,r.jsx)("strong",{children:t}),".",(0,r.jsx)("br",{}),"Are you sure you want to continue?"]})},g=e=>{let{nodesLength:n,roomName:t}=e;const o=1===n;return(0,r.jsxs)(r.Fragment,{children:["You are about to remove"," ",(0,r.jsxs)("strong",{children:[n," ",o?"node":"nodes"]})," ","from room ",(0,r.jsx)("strong",{children:t}),".",(0,r.jsx)("br",{}),"Are you sure you want to continue?"]})},p=e=>{let{name:n,nodesLength:t,roomName:o}=e;return t?1===t&&n?(0,r.jsx)(m,{name:n,roomName:o}):(0,r.jsx)(g,{nodesLength:t,roomName:o}):""},f=e=>{let{ids:n=[],name:t,onClose:h}=e;const p=(0,a.A)(),f=n.length,y=(0,s.XA)("name"),b=t?{"data-ga":"".concat(c,"WithName"),"data-testid":"".concat(l,"WithName"),message:(0,r.jsx)(m,{name:t,roomName:y}),title:d(t)}:{"data-ga":"".concat(c,"Bulk"),"data-testid":"".concat(l,"Bulk"),message:(0,r.jsx)(g,{nodesLength:f,roomName:y}),title:u(f)};return(0,r.jsx)(i.ConfirmationDialog,function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?A(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):A(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({handleConfirm:async()=>{await p(n)},handleDecline:h},b))}},37156(e,n,t){"use strict";t.d(n,{A:()=>de});t(98992),t(54520),t(3949);var o=t(64467),i=t(96540),a=t(63950),s=t.n(a),r=t(42358),A=t(10602),c=t(89590),l=t(99728),d=(t(81454),t(37550),t(62953),t(41344)),u=t(19186),h=t(24013),m=t(96600),g=t(89284),p=t(16866),f=t(41395),y=t(49181),b=t(74848);const E=e=>{let{icon:n,status:t,handleNavigateToDocs:o,message:i}=e;return(0,b.jsxs)(r.Flex,{column:!0,width:{max:"200px"},gap:2,children:[(0,b.jsxs)(r.Flex,{alignItems:"center",gap:2,children:[(0,b.jsx)(r.Icon,{name:n,size:"small"}),(0,b.jsx)(r.TextMicro,{children:(0,f.Zr)(t)})]}),(0,b.jsxs)(r.Flex,{column:!0,gap:2,children:[(0,b.jsx)(r.TextMicro,{children:i}),o&&(0,b.jsxs)(r.TextMicro,{"data-testid":"text-agent-outdated-critical",children:[(0,b.jsxs)(r.Box,{"data-testid":"open-add-node",onClick:o,as:r.TextMicro,cursor:"pointer",textDecoration:"underline",children:[" ","Check here"," "]}),"for troubleshooting"]})]})]})},w=e=>{let{state:n,name:t}=e;const o=(0,y.jZ)(n),{icon:a,text:s,textColor:A,indicatorWrapperColor:c,tooltip:l}=p.Q$[o.toLocaleLowerCase()],d=(0,i.useCallback)((()=>{window.open("https://learn.netdata.cloud/docs/netdata-cloud/connect-agent-to-cloud#troubleshoot","_blank","noopener,noreferrer")}),[]),u="".concat(t," ").concat(l);return(0,b.jsx)(r.Tooltip,{allowHoverOnTooltip:!0,content:(0,b.jsx)(E,{message:u,icon:a,status:s,handleNavigateToDocs:"Pending"===o?d:void 0}),children:(0,b.jsxs)(r.Flex,{margin:[0,"auto",0,"auto"],height:"20px",padding:[.5,0],border:{side:"all",color:c},background:"nodeBadgeBackground",justifyContent:"center",width:30,round:4,gap:2,alignItems:"center",children:[(0,b.jsx)(r.Icon,{name:a,size:"small",color:A}),(0,b.jsx)(r.TextMicro,{strong:!0,color:A,children:s})]})})};var B=t(90930),C=t(36504),M=t(92942),T=t(45087),I=t(51262),v=t(50979);const _=()=>{const{loaded:e,maxNodes:n}=(0,I.A)();return(0,b.jsx)(T.A,{align:"bottom",content:e?"Your current plan allows you to enable up to ".concat(n," nodes"):"",children:(0,b.jsxs)(r.Flex,{gap:2,children:[(0,b.jsx)(r.Text,{children:"Enabled"}),e?(0,b.jsx)(r.Icon,{name:"informationPress",size:"small",color:"text"}):null]})})},Q=()=>(0,b.jsx)(T.A,{content:"For some reason we couldn't load preferred nodes",align:"bottom",children:(0,b.jsx)(r.Icon,{name:"warning_triangle",color:"warning",size:"small"})}),D=e=>{var n;let{row:t,tempPreferredNodes:o=[],setTempPreferredNodes:a,loading:s}=e;const A=null===(n=t.original)||void 0===n?void 0:n.id,{loaded:c,maxNodes:l,hasError:d,isNodeRestricted:u}=(0,I.A)(),h=o.includes(A),{restricted:m,reason:g}=u(A),[p,f]=(0,i.useState)(!1),y=m&&"ErrWindowsAgentIsNotSupported"===g,E=(0,i.useMemo)((()=>s||!h&&c&&o.length>=l),[s,h,c,o,l]),w=(0,i.useMemo)((()=>E?null:y?v.KV[g]:p?"Disable this node":"Enable this node"),[y,g,p]);(0,i.useEffect)((()=>{f(h)}),[h,f]);const B=(0,i.useCallback)((e=>{f((e=>!e));const n=e.target.checked?[...o,A]:o.filter((e=>e!=A));a(n)}),[f,o,A,a]);return d?(0,b.jsx)(Q,{}):(0,b.jsx)(T.A,{align:"bottom",content:w,children:(0,b.jsx)(r.Box,{children:(0,b.jsx)(r.Toggle,{colored:!E,onChange:B,checked:p,disabled:E,toggleProps:{noTransition:!0}})})})};var x=t(80045),k=t(41708);const R=["showLockedNodes"];function S(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function P(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?S(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):S(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const F=(Y=e=>{let{name:n}=e;return(0,b.jsx)(r.TextSmall,{children:n})},e=>{let{showLockedNodes:n}=e,t=(0,x.A)(e,R);return n&&t.restricted?(0,b.jsx)(T.A,{plain:!0,content:(0,b.jsx)(k.u,{name:t.name,eligibility:t.eligibility}),isBasic:!0,children:(0,b.jsxs)(r.Flex,{gap:1,children:[(0,b.jsx)(r.Icon,{name:"padlock",width:"18px",height:"18px",color:"placeholder"}),(0,b.jsx)(Y,P({},t))]})}):(0,b.jsx)(Y,P({},t))});var Y;const U=e=>{let{getValue:n}=e;const t=n()||[];return(0,b.jsx)(r.Flex,{gap:1,flexWrap:!0,children:t.map((e=>(0,b.jsx)(r.Pill,{flavour:"neutral",hollow:"STATIC"==e,children:e},e)))})},N=()=>(0,b.jsxs)(r.Flex,{column:!0,width:{max:"200px"},gap:2,children:[(0,b.jsxs)(r.TextMicro,{children:[(0,b.jsx)(r.TextMicro,{strong:!0,children:"Static:"})," Node is member of this room as part of the claiming process or through user action."]}),(0,b.jsxs)(r.TextMicro,{children:[(0,b.jsx)(r.TextMicro,{strong:!0,children:"Rule:"})," Node is member of this room when it matches one or more room's rules."]})]}),j=(e,n)=>e===n?0:e>n?1:-1,z=e=>{let{isSpace:n,showAttention:t,tempPreferredNodes:o,setTempPreferredNodes:a,loading:s}=e;const{hasLimitations:A,maxNodes:c,nodesCount:l,preferredNodes:d,isNodeRestricted:u}=(0,I.A)(),h=A&&l>c;return(0,i.useMemo)((()=>[...n&&h?[{id:"enabled",header:_,cell:e=>{let{row:n}=e;return(0,b.jsx)(D,{row:n,tempPreferredNodes:o,setTempPreferredNodes:a,loading:s})},sortingFn:(e,n)=>{var t,o;return(null!==(t=e.original)&&void 0!==t&&t.isPreferred?1:0)-(null!==(o=n.original)&&void 0!==o&&o.isPreferred?1:0)}}]:[],{id:"agent",accessorKey:"name",enableColumnFilter:!1,filterFn:(e,n,t)=>{var o;const i=(null===(o=e.original)||void 0===o?void 0:o.name)||"";return null===i||void 0===i?void 0:i.toLowerCase().includes(t)},header:"Node",headerString:"Node",cell:e=>{let{getValue:n,row:t}=e;const{restricted:i}=u(t.original.id);return(0,b.jsx)(F,{nodeId:t.original.id,name:n(),showLockedNodes:h,preferredNodes:o,restricted:i})}},...t?[{id:"attention",accessorKey:"bugs",header:"Attention",headerString:"Attention",cell:e=>{let{getValue:n}=e;return(0,b.jsx)(r.Flex,{flexWrap:!0,children:(0,b.jsx)(M.A,{bugs:n()})})},sortingFn:(e,n)=>(e.original.bugs||[]).length-(n.original.bugs||[]).length}]:[],{id:"state",accessorKey:"state",header:"Status",headerString:"Status",cell:e=>{let{getValue:n}=e;return(0,b.jsx)(g.A,{state:n()})},sortingFn:(e,n)=>j((0,y.GM)(e.original.state),(0,y.GM)(n.original.state)),enableColumnFilter:!0,filterFn:(e,n,t)=>{const o=e.original.state;return t.length<1||t.some((e=>{let{value:n}=e;return"all"===n||n===(0,y.GM)(o)}))},meta:{filter:{component:"select",isMulti:!0,options:[{value:"Offline",label:"Offline"},{value:"Live",label:"Live"},{value:"Stale",label:"Stale"},{value:"Unseen",label:"Unseen"}]},tooltip:(0,b.jsx)(B.A,{})}},{id:"version",accessorKey:"version",header:"Version",headerString:"Version",cell:e=>{var n,t;let{getValue:o,row:i}=e;return(0,b.jsxs)(r.Flex,{gap:1,flexWrap:!0,children:[(0,b.jsx)(r.Pill,{flavour:"neutral",hollow:!0,children:o()}),i.original.updateSeverity&&(0,b.jsx)(C.A,{name:i.original.name,os:null===(n=i.original.os)||void 0===n?void 0:n.id,container:null===(t=i.original.hw)||void 0===t?void 0:t.container,warningLevel:"critical"===i.original.updateSeverity?"critical":"warning",labels:i.original.labels,version:o(),margin:[1,0]})]})}},{id:"roomMemberships",accessorKey:"roomMemberships",header:"Membership",headerString:"Membership",cell:U,enableColumnFilter:!0,filterFn:(e,n,t)=>{const o=e.original.roomMemberships||[],{value:i}=t||{};return"all"===i||o.includes(i)},meta:{filter:{component:"select",isMulti:!1,options:[{value:"STATIC",label:"Static"},{value:"RULE",label:"Rule"}]},tooltip:N}},{id:"connectionToCloud",accessorKey:"state",header:"Connection To Cloud",cell:e=>{let{getValue:n,row:t}=e;const o=t.original;return(0,b.jsx)(w,{state:n(),name:o.name})},sortingFn:(e,n)=>j((0,y.jZ)(e.original.state),(0,y.jZ)(n.original.state)),enableColumnFilter:!0,filterFn:(e,n,t)=>{const o=e.original.state;return t.length<1||t.some((e=>{let{value:n}=e;return"all"===n||("created"===o&&"created"===n||("created"!==o&&"completed"===n||void 0))}))},meta:{filter:{component:"select",isMulti:!0,options:[{value:"created",label:"Pending"},{value:"completed",label:"Completed"}]}}},{id:"updateSeverity",accessorKey:"updateSeverity",header:"Severity",headerString:"Severity",cell:e=>{var n,t;let{getValue:o,row:i}=e;const a=i.original;return(0,b.jsx)(C.A,{name:a.name,os:null===(n=a.os)||void 0===n?void 0:n.id,container:null===(t=a.hw)||void 0===t?void 0:t.container,warningLevel:a.updateSeverity,labels:a.labels||{},version:a.version,text:o()})}}]),[n,h,o,a,s,A,d,u])};var H=t(6304),O=t(55759),L=t(77186),G=t(63084),J=t(4701),q=t(30811);function K(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const V=e=>Array.isArray(e)?e.filter((e=>{let{hasAccessibleData:n}=e;return!n})):e.hasAccessibleData?[]:[{id:e.id}],X=e=>Array.isArray(e)?e:[{id:e.id}],W=e=>{let{openClaimNodeModal:n,roomUntouchable:t,isSpace:a}=e;const s=(0,G.A)(),A=(0,J.A)(),c=(0,u.XA)("name"),d=(0,l.JT)("node:Delete"),h=(0,l.JT)("room:RemoveNode"),m=(0,l.JT)("node:Create"),g=(0,l.JT)("room:AddNode"),p=(0,u.GJ)(),f=(0,q.l)(),y=(0,i.useCallback)((async(e,n)=>{if(!e)return;const t=V(e).map((e=>{let{id:n}=e;return n}));await s(t,{onSuccess:()=>n.toggleAllRowsSelected(!1)}),f()}),[V,s,f]),E=(0,i.useCallback)((async(e,n)=>{if(!e)return;const t=X(e).map((e=>{let{id:n}=e;return n}));await A(t,{onSuccess:()=>n.toggleAllRowsSelected(!1)})}),[X,A]),w=(0,i.useMemo)((()=>t?"Node removal is not allowed for this room":h?"Remove is disabled":"You do not have permission to remove the node from this room"),[t,h]),B=(0,i.useMemo)((()=>({delete:{confirmLabel:"Yes, delete",confirmationMessage:e=>(0,b.jsx)(O.TZ,{name:e.name}),confirmationTitle:e=>(0,O.uL)(e.name),declineLabel:"Cancel",disabledTooltipText:d?"Delete is disabled":"You do not have permission to delete this node",handleAction:y,isVisible:!!a,isDisabled:e=>e.disabled||!d,tooltipText:"Delete node from space"},remove:{confirmLabel:"Yes, remove",confirmationMessage:e=>(0,b.jsx)(L.G,{name:e.name,roomName:c}),confirmationTitle:e=>(0,L.fS)(e.name),declineLabel:"Cancel",handleAction:E,tooltipText:"Remove node from room",isVisible:!a,isDisabled:e=>e.disabled||t||!h,disabledTooltipText:w}})),[O.TZ,O.uL,L.fS,y,E,L.G,t,w,a,d,h]),C=(0,i.useMemo)((()=>function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?K(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):K(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({download:{handleAction:(0,r.downloadCsvAction)("Space nodes"),isDisabled:!1,tooltipText:"Download as CSV",icon:"download",confirmation:!1,alwaysEnabled:!0},addEntry:{disabledTooltipText:p?"This is a local virtual room for your agent and you cannot add more nodes from here. Check the parent children documentation to add children if you need to.":a?"Only admins can connect new nodes":"Only admins can add nodes to the room",handleAction:n,isDisabled:!p&&(a?!m:!(m||g&&!t)),tooltipText:a?"Connect new nodes to space":"Add nodes to room"}},a?{delete:{confirmLabel:"Yes, delete",confirmationMessage:(e,n)=>(0,b.jsx)(O.mK,{name:n[0].name,nodesLength:n.length}),confirmationTitle:(e,n)=>(0,O.FB)({name:n[0].name,nodesLength:n.length}),disabledTooltipText:d?"Delete is disabled":"Only admins can delete",declineLabel:"Cancel",handleAction:y,isDisabled:!d,tooltipText:"Delete nodes from space"}}:{remove:{confirmLabel:"Yes, remove",confirmationMessage:(e,n)=>(0,b.jsx)(L.VN,{name:n[0].name,nodesLength:n.length,roomName:c}),confirmationTitle:(e,n)=>(0,L.aq)({name:n[0].name,nodesLength:n.length}),declineLabel:"Cancel",disabledTooltipText:t?"Remove is disabled":"Only admins can remove",handleAction:E,isDisabled:t||!h,tooltipText:"Remove nodes from room"}})),[O.mK,O.FB,L.aq,y,E,m,d,h,L.VN,t,a]);return{rowActions:B,bulkActions:C,hasPermissionToDelete:d,hasPermissionToRemove:h}};function Z(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function $(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?Z(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):Z(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const ee=[{id:"state",desc:!1}],ne=e=>{let{flavour:n,roomUntouchable:t,customNodes:o,alwaysEnableNodeSelection:a,tempPreferredNodes:s,setTempPreferredNodes:r,loading:c,isSpace:l}=e;const{roomSlug:g}=(0,d.g)(),p=(0,u.ID)(g),f=(0,A.gr)(p,"ids"),b=(0,h.Gt)(f),E=(0,i.useMemo)((()=>(0,m.P)(o||b)),[o,b]),w=(0,i.useMemo)((()=>E.some((e=>{let{bugs:n}=e;return!!(n||[]).length}))),[E]),B=z({isSpace:l,showAttention:w,tempPreferredNodes:s,setTempPreferredNodes:r,loading:c}),[,C]=(0,i.useState)(""),[M,,T,I]=(0,H.A)(),{rowActions:v,bulkActions:_,hasPermissionToDelete:Q,hasPermissionToRemove:D}=W({openClaimNodeModal:T,roomUntouchable:t,isSpace:l}),x=(0,i.useCallback)(((e,n,t)=>{const o=t.toLowerCase(),i=e.getValue("agent").toLowerCase(),a=(0,y.GM)(e.getValue("state")).toLocaleLowerCase(),s=(0,y.jZ)(e.getValue("connectionToCloud")).toLocaleLowerCase(),r=(e.getValue("updateSeverity")||"").toLocaleLowerCase(),A=e.getValue("version").toLowerCase();return i.includes(o)||a.includes(o)||A.includes(o)||s.includes(o)||r.includes(o)}),[]),k=E.map((e=>{const t=l?!Q||!a&&e.hasAccessibleData:!D;return $($({},e),{},{disabled:t||"availableNodes"===n&&!e.isPreferred})}))||[],R=(0,i.useMemo)((()=>({name:!1,updateSeverity:!1,connectionToCloud:!1,roomMemberships:"roomNodes"===n})),[n]);return{nodes:k,nodeIds:f,columns:B,rowActions:v,bulkActions:_,isClaimNodeModalOpen:M,sortBy:ee,columnVisibility:R,setGlobalFilter:C,openClaimNodeModal:T,closeClaimNodeModal:I,globalFilterFn:x}};var te=t(51510),oe=(t(9391),t(63872)),ie=t(94404);const ae=(0,t(74891).A)((0,ie.A)(r.Button)),se=e=>{let{tempPreferredNodes:n,loading:t,onSaveStart:o=s(),onSaveEnd:a=s()}=e;const{hasLimitations:A,maxNodes:c,preferredNodes:l,nodesCount:d}=(0,I.A)(),u=(0,h.je)(),{refresh:m}=(0,h.Du)(),[g,p]=(0,oe.A)(),f=A&&d>c,y=(0,i.useMemo)((()=>!function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];if(e.length!==n.length)return!1;for(let t=0;t<e.length;t++)if(!n.includes(e[t]))return!1;return!0}(l,n)),[l,n]),E=t||!y,w=(0,i.useCallback)((()=>{o(),u(n).then((()=>{m(),g({header:"Saved preferred nodes",text:"Preferred nodes successfully updated!"})})).catch((e=>{p(e)})).finally(a)}),[n,u,m,o,a,g,p]);return f?(0,b.jsxs)(r.Flex,{gap:2,alignItems:"center",children:[(0,b.jsx)(ae,{feature:"PreferredNodesSettings",onClick:w,disabled:E,label:"Save",isLoading:t,tooltip:E?null:"Save preferred nodes",small:!0}),y?(0,b.jsxs)(r.Flex,{gap:1,alignItems:"center",children:[(0,b.jsx)(r.Icon,{name:"warning_triangle",color:"warning"}),(0,b.jsx)(r.Text,{children:"You have unsaved changes!"})]}):null]}):null},re=(0,te.default)(r.Box).withConfig({displayName:"activeNodesIndicator__ProgressBar",componentId:"sc-1gi3ps2-0"})(["position:absolute;left:0;height:4px;"]),Ae=e=>{let{isSpace:n,tempPreferredNodes:t,loading:o,startLoading:a,stopLoading:s}=e;const{nodesCount:A,hasLimitations:c,maxNodes:l}=(0,I.A)(),d=n&&c&&A>l,u=(0,i.useMemo)((()=>l>0?"".concat(t.length/l*100,"%"):"0%"),[t,l]);return d?(0,b.jsxs)(r.Flex,{gap:2,children:[(0,b.jsxs)(r.Flex,{width:"140px",column:!0,gap:1,children:[(0,b.jsxs)(r.Text,{children:["Active nodes: ",t.length,(0,b.jsxs)(r.Text,{color:"primary",children:["/",l]})]}),(0,b.jsx)(r.Box,{width:"100%",height:"4px",round:!0,overflow:"hidden",position:"relative",background:"border",children:(0,b.jsx)(re,{width:u,background:"primary"})})]}),(0,b.jsx)(se,{loading:o,tempPreferredNodes:t,onSaveStart:a,onSaveEnd:s})]}):null};function ce(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function le(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?ce(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):ce(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const de=e=>{let{flavour:n,roomUntouchable:t,showClaimNodeOnEmptySpace:o=!1,customNodes:a=[],customBulkActions:d,customRowActions:u,enableSelection:h=!0,showDefaultRowActions:m=!0,showDefaultBulkActions:g=!0,onRowSelected:p,onClickRow:f,disableClickRow:y,columnVisibility:E,customSortBy:w,showClaimModalWithRoomSelection:B=!1,alwaysEnableNodeSelection:C=!1,tempPreferredNodes:M=[],setTempPreferredNodes:T=s(),loading:I,startLoading:v=s(),stopLoading:_=s(),isSpace:Q}=e;const{nodes:D,columns:x,rowActions:k,bulkActions:R,sortBy:S,isClaimNodeModalOpen:P,openClaimNodeModal:F,closeClaimNodeModal:Y,columnVisibility:U,setGlobalFilter:N,globalFilterFn:j}=ne({flavour:n,roomUntouchable:t,customNodes:a,alwaysEnableNodeSelection:C,isSpace:Q,showClaimModalWithRoomSelection:B,tempPreferredNodes:M,setTempPreferredNodes:T,loading:I}),z=(0,l.JT)("node:Create"),H=(0,A.nj)();(0,i.useEffect)((()=>{H&&o&&z&&0===D.length&&F()}),[H]);const O=(0,i.useMemo)((()=>le(le({},U),E)),[U,E]);return x.length?(0,b.jsxs)(b.Fragment,{children:[(0,b.jsx)(r.Table,{headerChildren:(0,b.jsx)(Ae,{isSpace:Q,tempPreferredNodes:M,loading:I,startLoading:v,stopLoading:_}),onRowSelected:p,globalFilterFn:j,columnVisibility:O,enableSelection:h,enableSorting:!0,dataColumns:x,data:D,rowActions:m&&k||u,bulkActions:g&&R||d,sortBy:w||S,onSearch:N,testPrefixCallback:e=>e.hostname||e.name,onClickRow:f,disableClickRow:y}),!!P&&(0,b.jsx)(c.A,{onClose:Y,showRoomSelector:B})]}):null}},17662(e,n,t){"use strict";t.d(n,{A:()=>y,f:()=>p});var o=t(64467),i=(t(46449),t(93514),t(98992),t(54520),t(3949),t(8872),t(96540)),a=t(56171),s=t(42358),r=t(67462),A=t(16866),c=t(53949),l=t(24609),d=t(40545),u=t(24013),h=t(74848);function m(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function g(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?m(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):m(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const p={info:{background:"successSemi",isDismissable:!0,icon:"information",iconColor:"text"},warning:{background:"warningBackground",isDismissable:!0,icon:"warning_triangle_hollow",iconColor:"warning"},critical:{background:"errorBackground",isDismissable:!0,icon:"warning_triangle_hollow",iconColor:"error"}},f=e=>{let{iconColor:n,icon:t,warningLevel:o,numberOfNodesWithCriticalSeverity:i,onClickUpdate:a}=e;return(0,h.jsxs)(s.Flex,{justifyContent:"center",alignItems:"center",width:"100%",gap:2,children:[(0,h.jsx)(s.Icon,{"data-testid":"icon-banner-agent-outdated-".concat(o),color:n,name:t})," ",(0,h.jsxs)(s.Text,{"data-testid":"text-agent-outdated-".concat(o),color:"main",children:[i," ",(0,r.su)(i)," ",i>1?"are":"is"," below the recommended Agent version"," ",A.Qy,"."," ",(0,h.jsx)(s.Box,{"data-testid":"open-add-node",onClick:a,as:s.Text,cursor:"pointer",textDecoration:"underline",color:"main",children:"Please update them"})," ","to ensure you get the latest security bug fixes."]})]})},y=e=>{let{warningLevel:n,numberOfNodesWithCriticalSeverity:t,onClose:o,onClickUpdate:s}=e;const{background:r,isDismissable:A,icon:m,iconColor:y}=p[n],b=(()=>{const{id:e}=(0,d.A)(),n=(0,l.vt)(),t=(0,u.Gn)(),o=(0,u.Gt)(t),a=Object.entries(o.reduce(((e,n)=>{let{version:t}=n;return g(g({},e),{},{[t]:(e[t]||0)+1})}),{})).flat().join("_");return(0,i.useCallback)((()=>"dismissed-agent-version-manager-banner-".concat(e,"-").concat(n,"-").concat(a)),[e,n,a])})(),{dismissed:E,onClose:w}=(0,c.A)({getLocalStorageKey:b,logKey:"AgentVersionManagerDissmiss"}),B=(0,i.useCallback)((()=>{w(),o&&o()}),[w]);return E?null:(0,h.jsx)(a.A,{background:r,onClose:A?B:null,children:(0,h.jsx)(f,{iconColor:y,icon:m,warningLevel:n,numberOfNodesWithCriticalSeverity:t,onClickUpdate:s})})}},58042(e,n,t){"use strict";t.d(n,{Ay:()=>C,OS:()=>w,f7:()=>B});var o=t(64467),i=(t(98992),t(54520),t(3949),t(62953),t(96540)),a=t(52035),s=t(34843),r=t(18790),A=t(1817),c=t(19186),l=t(46587),d=t(47090),u=t(41936),h=t(16015),m=t(32886),g=t(74132),p=t(57377);function f(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function y(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?f(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):f(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const b=(0,r.I)((e=>{let{machineGuid:n=window.localNetdataRegistry.mg}=e;return(0,a.eU)({loading:!1,token:localStorage.getItem("agentJWT:".concat(n))||"",expiration:localStorage.getItem("agentJWTExp:".concat(n))||null,bearerProtection:!0,error:""})}),p.Ay),E=(0,r.I)((()=>(0,a.eU)(1))),w=()=>{const[{mg:e}]=(0,u.RJ)(),n=(0,s.Xr)(E(e));return(0,i.useCallback)((()=>{localStorage.removeItem("agentJWT:".concat(e)),localStorage.removeItem("agentJWTExp:".concat(e)),n((e=>e+1))}),[e])},B=()=>{const[{claimId:e,mg:n,nd:t}]=(0,u.RJ)();return(0,s.fp)(b({nodeId:t,machineGuid:n,claimId:e}))},C=()=>{const e=(0,c.GJ)(),[{claimId:n,mg:t,nd:o}]=(0,u.RJ)(),a=w(),[r,p]=(0,s.fp)(E(t)),[{loading:f,token:b,expiration:C,bearerProtection:M,error:T},I]=B(),v=(0,A.A)(r),_=(0,l.uW)("isAnonymous");return(0,i.useEffect)((()=>{"function"===typeof a&&(0,g.x)(a)}),[a]),(0,i.useEffect)((()=>{!f&&t&&e&&!_&&n&&o&&(localStorage.getItem("agentJWT:".concat(t))||r===v&&C&&1e3*C>(new Date).getTime()+3600||(I((e=>y(y({},e),{},{loading:!0}))),(0,h.q5)(o,t,n).then((e=>{let{data:n}=e;localStorage.setItem("agentJWT:".concat(t),null===n||void 0===n?void 0:n.token),localStorage.setItem("agentJWTExp:".concat(t),null===n||void 0===n?void 0:n.expiration),I(y(y({loading:!1},n),{},{error:""}))})).catch((e=>{var n;const o=null===e||void 0===e||null===(n=e.response)||void 0===n?void 0:n.data;localStorage.removeItem("agentJWT:".concat(t)),localStorage.removeItem("agentJWTExp:".concat(t)),I({loading:!1,token:"",expiration:null,bearerProtection:!0,error:(0,d.o)(null===o||void 0===o?void 0:o.errorMsgKey)||(null===o||void 0===o?void 0:o.errorMessage)||"Something went wrong"})}))))}),[t,o,C,r,e,_,v]),(0,m.A)(t),{token:b,bearerProtection:M,checkAgain:p,error:T}}},3458(e,n,t){"use strict";t.d(n,{A8:()=>C,An:()=>w,Jq:()=>B,Qb:()=>g,Wb:()=>p,b8:()=>m,eQ:()=>f,s2:()=>M,t9:()=>b});var o=t(64467),i=t(80045),a=(t(98992),t(54520),t(3949),t(81454),t(8872),t(91130)),s=t(41395),r=t(15505),A=t(52838);const c=["id"],l=["id"];function d(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function u(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?d(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):d(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const h=e=>{let{integrations:n}=e;return n.reduce(((e,n)=>{var t;const o=(0,r.bn)(n),i=o.available?"available":"unavailable";var a;(o.kindLabel=(0,s.Zr)(o.kind,!0),o.docsLink=null===(t=o.schema)||void 0===t||null===(t=t.annotations)||void 0===t?void 0:t.docsUrl,o.internal)||(o.fields=(0,A.Rm)(o.schema),o.required=(null===(a=o.schema)||void 0===a?void 0:a.required)||[]);return e[i].push(o),e}),{available:[],unavailable:[],original:n})},m=e=>a.A.get("/api/v2/spaces/".concat(e,"/integrations"),{transform:h}),g=(e,n)=>a.A.post("/api/v2/spaces/".concat(e,"/channel"),n),p=(e,n)=>a.A.delete("/api/v2/spaces/".concat(e,"/channel/").concat(n)),f=(e,n,t)=>a.A.put("/api/v2/spaces/".concat(e,"/channel/").concat(n),t),y=e=>{var n;const t=e.integration||{},{id:o}=t,a=(0,i.A)(t,c),s=u(u({},(0,r.bn)(e)),(0,r.bn)(a));return s.docsLink=null===(n=s.schema)||void 0===n?void 0:n.annotations.docsUrl,s.internal||(s.fields=(0,A.Rm)(s.schema),s.required=s.schema.required||[]),s},b=(e,n)=>a.A.get("/api/v2/spaces/".concat(e,"/channel/").concat(n),{transform:y}),E=e=>({channels:e.map((e=>{var n;const t=e.integration||{},{id:o}=t,a=(0,i.A)(t,l);return u(u(u({},(0,r.bn)(e)),a||{}),{},{kindLabel:(0,s.Zr)(null===(n=e.integration)||void 0===n?void 0:n.kind,!0)})})),original:e}),w=e=>a.A.get("/api/v2/spaces/".concat(e,"/channel"),{transform:E}),B=(e,n,t)=>a.A.patch("/api/v2/spaces/".concat(e,"/channel/").concat(n),{enabled:t}),C=function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return a.A.post("/api/v2/spaces/".concat(e,"/notifications/test"),n)},M=()=>a.A.get("/api/v2/notification-options")},42340(e,n,t){"use strict";t.d(n,{A:()=>c,V:()=>A});t(98992),t(54520),t(3949);var o=t(64467),i=t(18790),a=t(84929);function s(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function r(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?s(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):s(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const A={additionalProperties:!1,alerts:"ALARMS_SETTING_ALL",description:"",enabled:!0,error:"",fields:[],required:[],icon:"",internal:!0,id:"",integration:"",integrationId:"",kind:"",loading:!0,loaded:!1,name:"",rooms:null,roomSelections:[t(49635).PT],repeatNotificationMin:"",secrets:{}},c=(0,i.I)((e=>(0,a.Wj)((()=>(e=>r(r({},A),{},{id:e}))(e)))))},76933(e,n,t){"use strict";t.d(n,{A:()=>c,y:()=>A});t(98992),t(54520),t(3949);var o=t(64467),i=t(18790),a=t(84929);function s(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function r(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?s(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):s(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const A={currentChannelId:"",channels:[],error:"",id:null,loading:!0,loaded:!1,original:[]},c=(0,i.I)((e=>(0,a.Wj)((()=>(e=>r(r({},A),{},{id:e}))(e)))))},41258(e,n,t){"use strict";t.d(n,{A:()=>l});t(98992),t(54520),t(3949);var o=t(64467),i=t(3458),a=t(76571),s=t(76933),r=t(32052);function A(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function c(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?A(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):A(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const l=e=>{const n=(0,a.vq)(e);(0,r.A)((()=>({enabled:!!e,fetch:()=>(0,i.An)(e),onFail:e=>n(c(c({},s.y),{},{error:e.message})),onSettle:()=>n({loading:!1,loaded:!0}),onSuccess:e=>n(c(c({},s.y),e))})),[e])}},71819(e,n,t){"use strict";t.d(n,{A:()=>s});t(98992),t(81454);var o=t(96540),i=t(48301),a=t(41395);const s=()=>{const{loaded:e,value:n,hasError:t}=(0,i.mi)(),s=(0,o.useMemo)((()=>e&&!t?function(){return(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).map((e=>({value:e,label:(0,a.Zr)(e.toLowerCase())})))}(n):[]),[e,n,t]);return s}},65746(e,n,t){"use strict";t.d(n,{A:()=>c,u:()=>A});t(98992),t(54520),t(3949);var o=t(64467),i=t(18790),a=t(84929);function s(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function r(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?s(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):s(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const A={available:[],error:"",id:null,loading:!0,loaded:!1,original:[],unavailable:[]},c=(0,i.I)((e=>(0,a.Wj)((()=>(e=>r(r({},A),{},{id:e}))(e)))))},48301(e,n,t){"use strict";t.d(n,{mi:()=>R,yP:()=>k,j$:()=>x});var o=t(64467),i=t(80045),a=(t(98992),t(54520),t(3949),t(62953),t(96540)),s=t(52035),r=t(34843),A=t(18790),c=t(25316),l=t(30569),d=t(63872),u=t(91130),h=t(57377),m=t(3458);const g=["domain"];function p(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function f(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?p(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):p(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const y=(0,s.eU)((()=>(0,m.s2)().then((e=>e)).catch((()=>null)))),b={email:null,browser:null,mobile:null},E={alerts:null},w={me:()=>"/api/v2/accounts/me/notifications/settings",room:e=>{let{spaceId:n,roomId:t}=e;return"/api/v2/spaces/".concat(n,"/rooms/").concat(t,"/notifications/settings")}},B=e=>{let{domain:n}=e,t=(0,i.A)(e,g);return(0,w[n])(t)},C=(0,A.I)((e=>{const n=(e=>{let{domain:n}=e;return{me:b,room:E}[n]})(e),t=(0,s.eU)(n);return t.onMount=t=>{const o=B(e);u.A.get(o).then((e=>t(f(f(f({},n),e.data),{},{isLoaded:!0})))).catch((()=>t(f(f({},n),{},{isLoaded:!0}))))},t}),h.Ay);var M=t(68904);const T=["prop"];function I(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function v(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?I(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):I(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const _=(0,A.I)((e=>(0,s.eU)((n=>n(C(e))),((n,t,o)=>{const{prop:a}=e,s=(0,i.A)(e,T);t(C(s),(e=>{const n="object"!==typeof e[a]||Array.isArray(e[a])?o:v(v({},e[a]),{},{enabled:o});return a?v(v({},e),{},{[a]:n}):n}))}))),h.Ay),Q=function(e){let{shouldPersist:n=!0}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const t=(e=>(0,M.A)((function(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return u.A.patch(B(e),n)}),[e.domain,e.spaceId,e.roomId]))(e),[,o]=(0,d.A)();return(0,l.yF)((0,a.useCallback)((async(i,a,s)=>{if(a(_(e),s),!n)return;const r=await i(_(e)),{prop:A}=e;try{await t(v({},A?{[A]:s}:s))}catch(c){if(null!==c&&void 0!==c&&c.isCancel)return;o(c),a(_(e),A?r[A]:r)}}),[]))},D=e=>(0,r.md)(_(e)),x=e=>{const n={domain:"me"},t=D(n),o=Q(v(v({},n),{},{prop:e}));return[e?t[e]:t,o]},k=e=>{let{roomId:n,spaceId:t,key:o}=e;const i={domain:"room",roomId:n,spaceId:t},a=D(i),s=Q(v(v({},i),{},{prop:o}));return[o?a[o]:a,s]},R=()=>{var e;const n=(0,r.md)((0,c.A)(y));return{loaded:"loading"!==n.state,value:(null===(e=n.data)||void 0===e?void 0:e.data)||[],hasError:"hasError"===n.state}}},76571(e,n,t){"use strict";t.d(n,{EE:()=>_,Mw:()=>w,bY:()=>Q,ef:()=>M,g4:()=>E,m$:()=>v,t5:()=>C,vq:()=>T});var o=t(64467),i=(t(98992),t(54520),t(3949),t(62953),t(96540)),a=t(52035),s=t(34843),r=t(18790),A=t(30569),c=t(42340),l=t(76933),d=t(65746),u=t(52838),h=t(24609),m=t(3458),g=t(63872),p=t(57377);function f(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function y(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?f(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):f(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const b=(0,r.I)((e=>{let{id:n,key:t}=e;return(0,a.eU)((e=>{const o=e((0,c.A)(n));return t?o[t]:o}),((e,o,i)=>{o((0,c.A)(n),(e=>{const n="function"===typeof i?i(t?e[t]:e):i;return t?y(y({},e),{},{[t]:n}):y(y({},e),n)}))}))}),p.Ay),E=(e,n)=>(0,s.md)(b({id:e,key:n})),w=e=>(0,A.AY)(b(e)),B=(0,r.I)((e=>{let{id:n,key:t}=e;return(0,a.eU)((e=>{let o=e((0,l.A)(n));return o=y(y({},o),{},{channels:(0,u.kz)(o.channels)}),t?o[t]:o}),((e,o,i)=>{o((0,l.A)(n),(e=>t?y(y({},e),{},{[t]:i}):y(y({},e),i)))}))}),p.Ay),C=(e,n)=>(0,s.md)(B({id:e,key:n})),M=(e,n)=>(0,s.Xr)(b({id:e,key:n})),T=(e,n)=>(0,s.Xr)(B({id:e,key:n})),I=(0,r.I)((e=>{let{id:n,key:t}=e;return(0,a.eU)((e=>{let o=e((0,d.A)(n));return o=y(y({},o),{},{available:(0,u.kz)(o.available),unavailable:(0,u.kz)(o.unavailable)}),t?o[t]:o}),((e,o,i)=>{o((0,d.A)(n),(e=>t?y(y({},e),{},{[t]:i}):y(y({},e),i)))}))}),p.Ay),v=(e,n)=>(0,s.md)(I({id:e,key:n})),_=(e,n)=>(0,s.Xr)(I({id:e,key:n})),Q=()=>{const e=(0,h.vt)(),[n,t]=(0,g.A)();return(0,i.useCallback)((function(){let{id:o,slug:i,secrets:a={}}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return(0,m.A8)(e,{channelID:o,integrationSlug:i,secrets:a}).then((()=>{n({header:"Success",text:"Successfully sent test notification"})})).catch((e=>{var n;const o=e.errorMessage||(null===(n=e.response)||void 0===n||null===(n=n.data)||void 0===n?void 0:n.errorMessage)||"Something went wrong";t({header:"Error",text:o})}))}),[e])}},52838(e,n,t){"use strict";t.d(n,{kz:()=>B,Zv:()=>g,ct:()=>p,O5:()=>f,Rm:()=>y,$Q:()=>b,Pl:()=>E,s7:()=>w});var o=t(64467),i=(t(89463),t(98992),t(54520),t(72577),t(3949),t(8872),t(62953),t(80045)),a=(t(27495),t(81454),t(90179)),s=t.n(a),r=t(49635);const A=["description","placeholder","title","type"];function c(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function l(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?c(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):c(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const d=(e,n,t)=>{const{description:o,placeholder:a,title:c,type:d}=n,u=(0,i.A)(n,A),h=t.includes(e),m="integer"==d,g={description:o,id:e,isRequired:h,placeholder:a||"",title:c||e};if(("string"===d||m)&&"selection"!==e){const{format:e,maxLength:n,minLength:t}=u;return l(l(l({component:"input",getValue:e=>{let{id:n,secrets:t,subsetId:o}=e;return(o?t[n][o]:t[n])||""},isValid:o=>!!("uri"!==e||null!==o&&void 0!==o&&o.match(r.rx))&&(!(t&&o.length<t)&&!(n&&o.length>n))},n?{maxlength:n}:{}),t?{minlength:t}:{}),{},{onChange:e=>{let{id:n,setSecrets:t,subsetId:o}=e;return e=>t((t=>{if(m){const n=parseInt(e,10);e=isNaN(n)?0:n}return l(l({},t),{},o?{[n]:l(l({},t[n]),{},{[o]:e})}:{[n]:e})}))},type:m?"number":"uri"===e?"url":e||"text"},g)}if("object"===d){const{oneOf:e,patternProperties:n}=u;if(e)return l(l({component:"select",getValue:n=>{var t,o;let{id:i,secrets:a}=n;return{label:(null===(t=e.find((e=>{var n;return e.properties.selection.const===(null===(n=a[i])||void 0===n?void 0:n.selection)})))||void 0===t?void 0:t.title)||(h?e[0].title:"None"),value:(null===(o=a[i])||void 0===o?void 0:o.selection)||(h?e[0].properties.selection.const:"")}},onChange:n=>{let{id:t,setRequiredSubsets:o,setSecrets:i}=n;return n=>{let{label:a,value:r}=n;if(!r)return o((e=>s()(e,t))),void i((e=>s()(e,t)));const A=e.find((e=>e.title===a));null!==A&&void 0!==A&&A.required&&o((n=>l(l({},n),{},{[t]:e.find((e=>e.title===a)).required}))),i((e=>l(l({},e),{},{[t]:{selection:r}})))}}},e.reduce(((e,n)=>({fields:l(l({},e.fields),{},{[n.properties.selection.const]:y(n)}),options:[...e.options,{label:n.title,value:n.properties.selection.const}],required:l(l({},e.required),{},{[n.properties.selection.const]:n.required})})),{fields:{},options:h?[]:[{label:"None",value:""}],required:{}})),g);if(n)return l({component:"pairs",componentPairs:Object.entries(n).map((e=>{let[n,{type:t}]=e;return[r.C4[n],r.dZ[t]]}))},g)}return null};var u=t(78152);function h(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function m(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?h(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):h(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const g=(e,n)=>{const t=e=>e.toLowerCase().includes(n.toLowerCase()||"");return e.filter((e=>{let{description:n,kind:o,title:i}=e;return t(n)||t(o)||t(i)}))},p=(e,n,t)=>!!n&&("selection"===e||("select"===t.component||t.isValid(n))),f=e=>"Delete ".concat(e," channel"),y=e=>{let{properties:n,required:t}=e;return Object.keys(n).reduce(((e,o)=>{const i=d(o,n[o],t);return m(m({},e),i?{[o]:i}:{})}),{})},b=e=>{const[n,t]=e;return n===t?"".concat(n,"s"):"".concat(n,"/").concat(t)},E=e=>{let{roomIds:n,roomOptions:t}=e;return n.reduce(((e,n)=>{const o=t.find((e=>e.value===n));return o&&e.push(o.label),e}),[]).join(", ")},w=(e,n)=>Object.values(e).reduce(((e,t)=>"select"===t.component&&n.includes(t.id)?m(m({},e),{},{[t.id]:{selection:t.options[0].value}}):e),{}),B=e=>u.Mh?e:e.filter((e=>{let{id:n}=e;return n!==r.D_}))},97394(e,n,t){"use strict";t.d(n,{E_:()=>s,TZ:()=>i,xL:()=>a});var o=t(52035);const i=(0,o.eU)(null),a=(0,o.eU)(!1),s=(0,o.eU)({})},31246(e,n,t){"use strict";t.d(n,{JF:()=>i,a7:()=>r,aL:()=>o});const o={welcome:"welcome",firstNodeConnected:"firstNodeConnected",firstAlertTriggered:"firstAlertTriggered",firstNotificationSent:"firstNotificationSent",secondNodeConnected:"secondNodeConnected",threeOrMoreNodes:"threeOrMoreNodes",firstTeamMemberInvited:"firstTeamMemberInvited",firstDashboardCreated:"firstDashboardCreated"},i=(o.welcome,o.firstNodeConnected,o.firstAlertTriggered,o.firstNotificationSent,o.secondNodeConnected,o.threeOrMoreNodes,o.firstTeamMemberInvited,o.firstDashboardCreated,{homelabber:"homelabber",smallOrg:"smallOrg",largeOrg:"largeOrg"}),a=[i.homelabber,i.smallOrg,i.largeOrg],s=[i.smallOrg,i.largeOrg],r={[o.welcome]:[{id:"invite-team",title:"Invite a team member",description:"Collaborate on your infrastructure with colleagues",route:"settings/users#userManagementActiveTab=0",isSpaceRoute:!0,segments:s,permission:"space:InviteUser",spotlight:{targetId:"invite-user-button",title:"Invite Team Members",description:"Click here to invite colleagues to collaborate on your infrastructure",position:"left"}},{id:"setup-sso",title:"Configure SSO",description:"Set up single sign-on for your team",route:"settings/users#userManagementActiveTab=1",isSpaceRoute:!0,segments:[i.largeOrg],permission:"oidc:Manage"},{id:"browse-integrations",title:"Browse integrations",description:"Discover data collectors and notification channels",openIntegrations:!0,segments:a},{id:"create-rooms",title:"Create Rooms",description:"Organize nodes into groups for easier management",route:"settings/rooms",isSpaceRoute:!0,segments:s,permission:"room:Create",spotlight:{targetId:"create-room-button",title:"Create a Room",description:"Click here to create a room and organize your nodes into groups",position:"left"}},{id:"set-theme",title:"Customize appearance",description:"Set your theme, organize spaces, and choose chart design",openProfileModal:"theme",segments:a,spotlight:{steps:[{targetId:"appearance-theme",title:"Choose Your Theme",description:"Select light or dark mode to match your preference. Dark mode is enabled by default.",position:"left"},{targetId:"appearance-charts",title:"Charts Design",description:"Pick between the default full-detail charts or a minimal design for a cleaner look.",position:"left"}]}},{id:"configure-preferences",title:"Set your preferences",description:"Configure refresh mode, data display, and number format",openProfileModal:"preferences",segments:a,isFeatureAnnouncement:!0,spotlight:{steps:[{targetId:"preferences-refresh",title:"Refresh Mode",description:"Standard Play pauses refreshes when the tab loses focus. Force Play keeps refreshing continuously \u2014 useful for monitoring dashboards on secondary screens.",position:"left"},{targetId:"preferences-locale",title:"Number and Date Format",description:"Choose how numbers, dates, and times are formatted. This setting applies throughout the entire application.",position:"left"}]}}],[o.firstNodeConnected]:[{id:"explore-metrics",title:"Explore infrastructure metrics",description:"View real-time charts and learn to filter and analyze your data",route:"overview",segments:a,permission:"insights:CreateReport",spotlight:{steps:[{selector:"#toc-menu-wrapper-overview-default",title:"Your Metrics Dashboard",description:"This is your infrastructure overview. Charts are organized by category showing CPU, memory, network, disk, and application metrics in real-time.",position:"left"},{selector:'[data-testid="chart"] [data-testid="chartFilters"]',title:"Filter Your Data (NIDL)",description:"Use N-I-D-L filters to slice metrics by Nodes, Instances, Dimensions, and Labels. Each filter shows contribution % and anomaly rates.",position:"top"},{selector:'[data-testid="chart"] [data-testid="chartHeaderToolbox-chartType"]',title:"Change Visualization",description:"Switch between line charts, stacked areas, heatmaps, bar charts, and more to visualize your data differently.",position:"top"},{selector:'[data-testid="chart"] [data-testid="chartHeaderToolbox-information"]',title:"Chart Information",description:"View details about this metric including its source, update frequency, and available dimensions.",position:"top"},{selector:'[data-testid="chart"] [data-testid="chartHeaderToolbox-settings"]',title:"Chart Settings",description:"Customize the chart display including value range, number format, and visibility of chart elements.",position:"top"},{selector:'[data-testid="chart"] [data-testid="chartHeaderToolbox-download"]',title:"Download Data",description:"Export the chart data as CSV, PDF, or PNG for reports and offline analysis.",position:"top"},{selector:'[data-testid="chart"] [data-testid="chartHeaderToolbox-addSettings"]',title:"Save Your Settings",description:"Save your chart customizations for personal use, or share them across the room or space for your team.",position:"top"},{selector:'[data-testid="chart"] [data-testid="chartHeaderToolbox-drag"]',title:"Add to Dashboard",description:"Drag and drop this chart into a custom dashboard to build your personalized monitoring view.",position:"top"}]}},{id:"use-netdata-ai",title:"Investigate with AI",description:"Ask AI about performance issues across your infrastructure",segments:a,isFeatureAnnouncement:!0,permission:"insights:CreateReport",spotlight:{steps:[{selector:'[data-testid="workspaceBar-ai-button"]',title:"Netdata AI Assistant",description:"Click here to open Netdata AI and ask questions about your infrastructure, alerts, or metrics.",position:"right"},{selector:'[data-testid="chat-ai-welcome-view-composer"]',title:"Ask Anything",description:"Type your question here. Ask about performance issues, anomalies, or get help understanding your metrics.",position:"bottom",openAiPanel:!0},{selector:'[data-testid="chat-ai-sample-items"]',title:"Quick Prompts",description:"Use these suggestions to quickly ask common questions about your infrastructure.",position:"top"},{selector:'[data-testid="chat-ai-tab-conversations"]',title:"Previous Conversations",description:"Access your chat history here. Continue previous conversations or review past insights.",position:"bottom"}]}},{id:"view-logs",title:"View your logs",description:"Check application and system logs",route:"logs",segments:a},{id:"review-alerts",title:"Review auto-configured alerts",description:"alerts are already monitoring your system. Review and customize them",route:"alerts",dynamicDescription:!0,segments:a,alertTabIndex:1},{id:"use-time-picker",title:"Adjust time range",description:"Use the date/time picker to analyze historical data",route:"overview",segments:a,spotlight:{targetId:"time-picker",title:"Time Range Picker",description:"Adjust the time window to analyze historical data or zoom into specific periods. Note: The picker is disabled on pages where time selection doesn't affect the displayed content.",position:"bottom"}}],[o.firstAlertTriggered]:[{id:"view-alert-details",title:"View alert details",description:"See what triggered the alert and its current status",route:"alerts",segments:a,openFirstAlert:!0,alertTabIndex:0},{id:"ai-troubleshoot",title:"Explore AI insights",description:"Get AI-powered reports for anomalies, performance, and troubleshooting",route:"insights",segments:a,permission:"insights:CreateReport",spotlight:{steps:[{selector:'[data-testid="ai-credits-link"]',title:"AI Credits",description:"Track your remaining AI credits here. Credits are consumed when generating reports.",position:"bottom"},{selector:'[data-testid="insights-overview-container"]',title:"AI-Powered Reports",description:"Choose from different report types: Anomaly Analysis, Capacity Planning, Infrastructure Summary, and Performance Optimization.",position:"left"},{selector:'[data-testid="insights-overview-report-item-card-container"]',title:"Generate a Report",description:"Click any card to generate an AI-powered report. Each report type analyzes your infrastructure from a different perspective.",position:"bottom"},{selector:'[data-testid="insights-sidebar-container"]',title:"Report History",description:"Your generated reports appear here. Filter, search, and access past reports anytime.",position:"right"}]}},{id:"setup-notifications",title:"Connect notification channels",description:"Get alerted via Slack, PagerDuty, Discord, and more",route:"settings/notifications#notificationsActiveTab=0",isSpaceRoute:!0,segments:a,maxChannels:2,permission:"channel:Manage",channelAwareContent:{title:"Add more notification channels",description:"Expand your alerting with additional integrations"}},{id:"create-silencing",title:"Create silencing rules",description:"Reduce alert noise during maintenance windows",route:"settings/notifications#notificationsActiveTab=1",isSpaceRoute:!0,segments:a,permission:"space:CreateSystemSilencingRule"}],[o.firstNotificationSent]:[],[o.secondNodeConnected]:[{id:"use-node-filter",title:"Filter by node",description:"Use the global node filter to focus on specific nodes",route:"overview",segments:a,spotlight:{targetId:"node-filter",title:"Node Filter",description:"Click here to filter your view to specific nodes in your infrastructure",position:"bottom"}}],[o.threeOrMoreNodes]:[{id:"config-manager",title:"Configure collectors",description:"Use Config Manager to set up data collection",route:"settings/configurations",isSpaceRoute:!0,segments:a,permission:"agent:EditDynCfg"},{id:"snmp-monitoring",title:"Monitor SNMP devices",description:"Add network device monitoring",openIntegrations:!0,integrationsSearchTerm:"SNMP",segments:a},{id:"discover-node-groups",title:"Organize nodes with Groups",description:"Create custom tabs to filter and organize your nodes",route:"nodes",segments:a,isFeatureAnnouncement:!0,permission:"space:UpdateSettings",spotlight:{steps:[{targetId:"groups-menu-button",title:"Node Groups",description:"Create custom groups to organize your nodes by environment, location, or any criteria.",position:"bottom"},{selector:'[data-testid="search-filter-input"]',title:"Filter Nodes",description:"Filter nodes by status, version, labels, and more. These filters will be preselected when creating a group.",position:"bottom"},{selector:'[data-testid="netdata-table-action-createGroup-bulk"]',title:"Create Group",description:"Create a new group with your current filters preselected, making it easy to organize similar nodes.",position:"bottom"},{selector:'[data-testid="netdata-table-action-addToGroup"]',title:"Add to Group",description:"Add this specific node to an existing group or create a new group with it.",position:"left"},{selector:'[data-testid="tableGroupByFilterControl"]',title:"Group by Groups",description:"Organize your table by grouping nodes based on the groups you've created. See all nodes in each group at a glance.",position:"bottom"},{selector:'[data-testid="globalFilter-nodes"]',title:"Organize with Groups",description:"Creating groups is a great way to filter nodes in your infrastructure. Use them to group nodes by service, environment, or location and easily monitor individual services.",position:"bottom"}]}}],[o.firstTeamMemberInvited]:[{id:"manage-roles",title:"Set up roles",description:"Configure team permissions and access levels",route:"settings/users#userManagementActiveTab=0",isSpaceRoute:!0,segments:[i.largeOrg],permission:"user:ChangeRoles",spotlight:{selector:'[data-testid="netdata-table-action-userSettings"]',title:"Manage User Role",description:"Click here to change a team member's role and adjust their permissions within the space.",position:"left"}}],[o.firstDashboardCreated]:[{id:"dashboard-controls",title:"Master custom dashboards",description:"Learn how to add charts, text, and customize your custom dashboard",openFirstDashboard:!0,segments:a,permission:"dashboard:Update",spotlight:{steps:[{selector:'[data-testid="dashboardHeaderActionBar-addChart-button"]',title:"Add Charts",description:"Click here to add charts from your infrastructure to this dashboard.",position:"bottom"},{selector:'[data-testid="dashboardHeaderActionBar-addText-button"]',title:"Add Text Cards",description:"Add markdown text cards for documentation, notes, or section headers.",position:"bottom"},{selector:'[data-testid="dashboardHeaderActionBar-save-button"]',title:"Save Dashboard",description:"Save your dashboard changes. The button is enabled when you have unsaved modifications.",position:"bottom"},{selector:'[data-testid="tv-button"]',title:"TV Mode",description:"Generate a URL to display this dashboard on a TV or large screen for team visibility.",position:"bottom"}]}}]};o.welcome,o.firstNodeConnected,o.firstAlertTriggered,o.firstNotificationSent,o.secondNodeConnected,o.threeOrMoreNodes,o.firstTeamMemberInvited,o.firstDashboardCreated},56523(e,n,t){"use strict";t.d(n,{A:()=>b});var o=t(64467),i=(t(98992),t(54520),t(3949),t(8872),t(37550),t(62953),t(96540)),a=t(34843),s=t(23565),r=t(49163),A=t(63928),c=t(19186),l=t(31246),d=t(97394);function u(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function h(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?u(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):u(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const m="onboarding",g="home",p="/state",f={quickWins:{},dismissedBanners:{},milestones:{completed:{}}},y={"explore-metrics":"explore-metrics","review-alerts":"review-alerts","invite-team":"invite-team","connect-nodes":"connect-nodes"},b=()=>{var e;const n=(0,c.ID)(),{id:t}=(0,c.pr)(),o=n||t,u=(0,s.Hn)({roomId:o}),b=null===(e=(0,r._)({roomId:o,params:{type:m,entity:g,path:p}})[0])||void 0===e?void 0:e.id,E=(0,s.ti)(b),[w,B]=(0,a.fp)(d.E_),C=(0,i.useRef)(!1);C.current=w[o];const M=(0,i.useMemo)((()=>{const e=(null===E||void 0===E?void 0:E.value)||{},n=e.quickWins||{},t=(e.milestones||{}).completed||{},o=t.firstNodeConnected;let i=t;if(Object.keys(n).some((e=>n[e]))&&!(null!==o&&void 0!==o&&o.completedAt)){const e=(e=>{const n={};return Object.entries(e).forEach((e=>{let[t,o]=e;const i=y[t];i&&o&&(n[i]=Date.now())})),n})(n);i=h(h({},t),{},{firstNodeConnected:{completedAt:Date.now(),seenAt:Date.now(),tasks:e}})}return h(h(h({},f),e),{},{milestones:h(h({},f.milestones),{},{completed:h(h({},f.milestones.completed),i)})})}),[null===E||void 0===E?void 0:E.value]),T=(0,i.useRef)(M);T.current=M;const I=(0,A.yK)({roomId:o,silent:!0,onResolve:()=>B((e=>h(h({},e),{},{[o]:!1})))}),v=(0,A.xS)(b,{roomId:o,silent:!0,onResolve:()=>B((e=>h(h({},e),{},{[o]:!1})))}),_=(0,A.z2)(b,{roomId:o,silent:!0,onResolve:()=>B((e=>h(h({},e),{},{[o]:!1})))}),Q=(0,i.useCallback)((()=>{C.current||b&&(B((e=>h(h({},e),{},{[o]:!0}))),_())}),[b,_,o,B]),D=(0,i.useCallback)((e=>{if(!u)return;const n=h(h({},T.current),e),t={type:m,entity:g,path:p,value:n};T.current=n,C.current||(B((e=>h(h({},e),{},{[o]:!0}))),b?v(h({id:b},t)):I(t))}),[b,u,I,v,o,B]),x=(0,i.useCallback)((e=>{const n=T.current;D({quickWins:h(h({},n.quickWins),{},{[e]:!0})})}),[D]),k=(0,i.useCallback)((e=>{const n=T.current;D({dismissedBanners:h(h({},n.dismissedBanners),{},{[e]:!0})})}),[D]),R=(0,i.useCallback)((e=>{const n=T.current,t=n.milestones.completed[e];null!==t&&void 0!==t&&t.completedAt||D({milestones:h(h({},n.milestones),{},{completed:h(h({},n.milestones.completed),{},{[e]:{completedAt:Date.now(),seenAt:null,tasks:{}}})})})}),[D]),S=(0,i.useCallback)((e=>{const n=T.current,t=e.reduce(((e,t)=>{const o=n.milestones.completed[t];return null!==o&&void 0!==o&&o.completedAt||(e[t]={completedAt:Date.now(),seenAt:null,tasks:{}}),e}),{});Object.keys(t).length&&D({milestones:h(h({},n.milestones),{},{completed:h(h({},n.milestones.completed),t)})})}),[D]),P=(0,i.useCallback)((e=>{const n=T.current,t=n.milestones.completed[e];t&&!t.seenAt&&D({milestones:h(h({},n.milestones),{},{completed:h(h({},n.milestones.completed),{},{[e]:h(h({},t),{},{seenAt:Date.now()})})})})}),[D]),F=(0,i.useCallback)((e=>{const n=T.current,t=Date.now(),o=e.reduce(((e,o)=>{const i=n.milestones.completed[o];return!i||i.seenAt||(e[o]=h(h({},i),{},{seenAt:t})),e}),{});Object.keys(o).length&&D({milestones:h(h({},n.milestones),{},{completed:h(h({},n.milestones.completed),o)})})}),[D]),Y=(0,i.useCallback)(((e,n)=>{var t;const o=T.current,i=o.milestones.completed[e];i&&(null!==(t=i.tasks)&&void 0!==t&&t[n]||D({milestones:h(h({},o.milestones),{},{completed:h(h({},o.milestones.completed),{},{[e]:h(h({},i),{},{tasks:h(h({},i.tasks),{},{[n]:Date.now()})})})})}))}),[D]),U=(0,i.useCallback)((()=>{const e=T.current,n=Date.now(),t=Object.entries(e.milestones.completed).reduce(((e,t)=>{let[o,i]=t;return e[o]=i.seenAt?i:h(h({},i),{},{seenAt:n}),e}),{});D({milestones:h(h({},e.milestones),{},{completed:t})})}),[D]);return{settingsLoaded:u,quickWins:M.quickWins,dismissedBanners:M.dismissedBanners,milestones:M.milestones,markQuickWinCompleted:x,dismissBanner:k,recordMilestone:R,markMilestoneSeen:P,markTaskCompleted:Y,recordMilestones:S,markMilestonesSeen:F,resetOnboarding:Q,dismissOnboarding:U,defaultMilestones:l.aL}}},82213(e,n,t){"use strict";t.d(n,{A:()=>B});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(96540),s=t(42358),r=t(88255),A=t(24609),c=t(19186),l=t(37156),d=t(92318),u=t(1174),h=t(74848);const m=e=>{let{spaceName:n}=e;return(0,h.jsxs)(s.Text,{textAlign:"center",children:["Please ask your administrator to claim more nodes to\xa0",(0,h.jsx)(s.Text,{strong:!0,children:n})," and you will be able to add them to this room"]})},g=()=>(0,h.jsx)(s.Text,{textAlign:"center",children:"To add nodes to this room, you first need to claim them to its space."}),p=e=>{let{spaceName:n}=e;const t="".concat(d.A.assetsBaseURL,"/img/rack.png");return(0,h.jsxs)(s.Flex,{column:!0,padding:[4,8,0],alignItems:"center",gap:4,children:[(0,h.jsx)("img",{src:t,alt:"server-rack",width:"188px",height:"188px"}),(0,h.jsxs)(s.H4,{textAlign:"center",children:["No claimed nodes available in this Space: ",n]}),(0,h.jsx)(u.A,{permission:"node:Create",children:e=>e?(0,h.jsx)(g,{}):(0,h.jsx)(m,{spaceName:n})})]})};t(81454),t(8872),t(62953);const f=e=>{let{setNodes:n}=e;const[t,o]=(0,a.useState)(""),[i,s]=(0,a.useState)(!0),[r,A]=(0,a.useState)([]);return(0,a.useEffect)((()=>{const e=r.map((e=>e.id));e.length||!t||i||n((e=>t.split(",").reduce(((e,n)=>e.filter((e=>e.id!==n))),e))),e.length&&e.join()!==t&&(n((n=>{const t=n.map((e=>e.id));return e.reduce(((e,n,o)=>t.includes(n)?e:[...e,r[o]]),n)})),s(!1)),o(e.join())}),[r]),{onRowSelected:A}},y=["nodesLoaded","claimedNodeIds","data","roomId","setNodes","showHeader","onAddNodes","canAddNodes","addNodesGA"];function b(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function E(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?b(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):b(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const w={connectionToCloud:!1},B=e=>{let{nodesLoaded:n,claimedNodeIds:t,data:o,roomId:d,setNodes:u,showHeader:m,onAddNodes:g,canAddNodes:b,addNodesGA:B}=e,C=(0,i.A)(e,y);const{onRowSelected:M}=f({setNodes:u}),T=(0,A.ap)(),I=(0,c.wz)(d),v=(0,a.useMemo)((()=>({addEntry:{handleAction:g,tooltipText:"Add the selected nodes to the room",isDisabled:!b,disabledTooltipText:"Select some nodes to add to the room","data-ga":B}})),[g,b]);return(0,h.jsxs)(s.Flex,E(E({column:!0,gap:3,padding:[2,0,0],flex:"1",width:"100%"},C),{},{children:[m&&(0,h.jsx)(s.Flex,{margin:[0,0,5],children:(0,h.jsxs)(s.H3,{children:["Available Nodes (",t.length,")"]})}),m&&(0,h.jsx)(s.Flex,{children:(0,h.jsxs)(s.Text,{color:"textDescription",children:["Nodes in ",T.name," that can be added to ",I.name]})}),t.length>0?(0,h.jsx)(r.A,{"data-testid":"nodesTable-layout",overflow:"hidden",height:"100%",children:(0,h.jsx)(l.A,{flavour:"availableNodes",enableSelection:!0,customNodes:o,showDefaultRowActions:!1,showDefaultBulkActions:!1,customBulkActions:v,onRowSelected:M,columnVisibility:w,alwaysEnableNodeSelection:!0})}):n?(0,h.jsx)(p,{spaceName:T.name}):(0,h.jsx)(s.Text,{color:"menuItem",children:"Loading nodes..."})]}))}},89590(e,n,t){"use strict";t.d(n,{A:()=>f});var o=t(64467),i=(t(98992),t(54520),t(3949),t(62953),t(96540)),a=t(42358),s=t(19186),r=t(99728),A=t(27587),c=t(58218),l=t(82213),d=t(10444),u=t(25962),h=t(60908),m=t(74848);function g(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function p(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?g(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):g(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const f=e=>{let{onClose:n,nodeType:t,showRoomSelector:o}=e;const g=(0,d.J)(),f=(0,r.JT)("node:Create"),y=(0,s.XA)(),b=(0,s.pr)(),E=null!==y&&void 0!==y&&y.loaded?y:b,w=null===E||void 0===E?void 0:E.id,[B,C]=(0,i.useState)((()=>w?[w]:[])),[M,{onAddNodes:T,selectedNodes:I}]=(0,u.A)(w,{addNodesCallback:n});return(0,m.jsx)(h.Ay,{feature:"AddNodes",children:(0,m.jsx)(a.Modal,{"data-testid":"addRoomModal",backdropProps:{backdropBlur:!0},onEsc:n,onClickOutside:n,children:(0,m.jsxs)(a.ModalContent,{"data-testid":"addRoomModal-content",width:{min:100,base:g?"90vw":"55vw"},children:[(0,m.jsxs)(a.ModalHeader,{"data-testid":"addRoomModal-header",children:[(0,m.jsxs)(a.H3,{children:["Add nodes to ",null===E||void 0===E?void 0:E.name]}),(0,m.jsx)(a.ModalCloseButton,{onClose:n,testId:"close-button"})]}),(0,m.jsxs)(a.ModalBody,{height:"600px",overflow:{vertical:"auto"},children:[f&&(0,m.jsxs)(a.Flex,{column:!0,gap:8,children:[o?(0,m.jsx)(A.A,{placeholder:"Select Rooms",selectedValue:B,onChange:C}):null,(0,m.jsx)(c.A,{nodeType:t,rooms:B,detailsOpen:!1})]}),!E.untouchable&&(0,m.jsx)(l.A,p(p({"data-testid":"addRoomModal-availableNodes",showHeader:!0,roomId:w},M),{},{onAddNodes:T,canAddNodes:!!I.length,addNodesGA:"add-war-room-modal::click-add-node::global-view"}))]})]})})})}},58218(e,n,t){"use strict";t.d(n,{A:()=>C});var o=t(64467),i=(t(98992),t(54520),t(3949),t(81454),t(62953),t(96540)),a=t(63950),s=t.n(a),r=t(42358),A=t(61661),c=t(29147),l=t(19844),d=t(74848);const u=e=>{let{label:n,iconName:t="code"}=e;return n?(0,d.jsxs)(d.Fragment,{children:[(0,d.jsx)(r.Icon,{name:t,size:"small"}),(0,d.jsx)(r.TextSmall,{strong:!0,textTransform:"uppercase",children:n})]}):null},h=(0,i.memo)(u);var m=t(51510);(0,m.default)(r.Box).attrs({border:{side:"all",color:"border"},padding:[1.75,7,1.75,3],round:!0}).withConfig({displayName:"styled__InfoBlock",componentId:"sc-1lice8m-0"})(["color:",";font-family:monospace;font-size:14px;letter-spacing:0.09px;line-height:18px;max-height:34px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap;width:100%;"],(0,r.getColor)("textLite"));const g=e=>{let{children:n}=e;return(0,d.jsx)(r.Flex,{column:!0,gap:4,children:n})},p=(0,m.default)(r.Button).withConfig({displayName:"styled__StyledButton",componentId:"sc-1lice8m-1"})(["&&{width:auto;min-width:auto;height:22px;background:",";border:1px solid ",";border-radius:4px;padding:0;&:hover{background:",";& > span{span{color:",";}svg{fill:",";}}}& > span{display:flex;align-items:center;gap:6px;margin:2px 8px 2px 4px;span{color:",";}svg{fill:",";}}}"],(e=>{let{active:n,theme:t}=e;return(0,r.getColor)(n?"primary":"modalBackground")({theme:t})}),(0,r.getColor)("primary"),(0,r.getColor)("primary"),(0,r.getColor)("modalBackground"),(0,r.getColor)("modalBackground"),(e=>{let{active:n,theme:t}=e;return(0,r.getColor)(n?"modalBackground":"primary")({theme:t})}),(e=>{let{active:n,theme:t}=e;return(0,r.getColor)(n?"modalBackground":"primary")({theme:t})}));var f=t(29645),y=t(6304);const b=e=>{let{onClose:n,children:t}=e;const o=(0,i.useRef)(),[a,s,,A]=(0,y.A)(),[,l]=(0,f.$)();return(0,i.useEffect)((()=>{l(null)}),[l]),(0,d.jsxs)(d.Fragment,{children:[(0,d.jsxs)(r.Flex,{alignItems:"center",justifyContent:"between",margin:[0,0,2,0],children:[(0,d.jsx)(r.Flex,{children:t}),(0,d.jsxs)(r.Flex,{alignItems:"center",gap:2,children:[(0,d.jsx)(r.Flex,{ref:o,children:(0,d.jsx)(r.Icon,{name:"gear",color:"text",cursor:"pointer",onClick:s})}),n?(0,d.jsx)(r.Icon,{name:"x",color:"text",cursor:"pointer",onClick:n}):null]})]}),o.current&&a?(0,d.jsx)(r.Drop,{width:45,target:o.current,align:{top:"bottom",right:"right"},background:"modalBackground",margin:[2,0,0],round:!0,close:A,onClickOutside:A,onEsc:A,children:(0,d.jsx)(r.Flex,{padding:[3],children:(0,d.jsx)(c.A,{vertical:!0,nested:!0})})}):null]})};t(72577);function E(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const w={linux:"deploy-linux-generic",centos:"deploy-linux-generic",macos:"deploy-macos",freebsd:"deploy-freebsd",ubuntu:"deploy-linux-generic",debian:"deploy-linux-generic",cloudlinux:"deploy-linux-generic",Container:"deploy-docker"},B={"deploy-linux-generic":"osLinux","deploy-windows":"windows","deploy-docker":"serviceDockerHub","deploy-kubernetes":"serviceKubernetes","deploy-macos":"osMacOSX","deploy-freebsd":"osFreeBSD",default:"code"},C=e=>{let{integrationId:n,nodeType:t,rooms:a=[],detailsOpen:u=!0,startLoading:m,ContentContainer:f=i.Fragment,onTabChange:y=s(),onClose:C}=e;const M=(0,A.FF)(),T=(()=>{const e=(0,A.AR)();return n=>e.find((e=>e.id==n))})(),I=T(n),[v,_]=(0,i.useState)(function(){let e=arguments.length>1?arguments[1]:void 0;const n=(arguments.length>0&&void 0!==arguments[0]?arguments[0]:[]).findIndex((n=>{let{id:t}=n;return t===w[e]}));return n>=0?n:0}(M,t)),Q=(0,i.useCallback)((e=>{_(e),y(M[e])}),[M,_,y]);return(0,d.jsx)(r.Flex,{"data-testid":"quickstart",column:!0,gap:4,width:"100%",children:I?(0,d.jsxs)(d.Fragment,{children:[(0,d.jsx)(r.Flex,{justifyContent:"end",children:(0,d.jsx)(c.A,{})}),(0,d.jsx)(l.A,{integration:I,rooms:a,navigateToSettings:!0})]}):(0,d.jsx)(r.Tabs,{selected:v,onChange:Q,TabsHeader:e=>(0,d.jsx)(b,function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?E(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):E(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({onClose:C},e)),TabContent:g,tabsProps:{gap:2},children:M.map((e=>(0,d.jsx)(r.Tab,{as:p,label:(0,d.jsx)(h,{label:e.name,iconName:B[e.id]||B.default}),children:(0,d.jsx)(f,{children:(0,d.jsx)(l.A,{integration:e,rooms:a,detailsOpen:u,navigateToSettings:!0,startLoading:m})})},e.name)))})})}},29645(e,n,t){"use strict";t.d(n,{a:()=>c,$:()=>l});var o=t(52035),i=t(34843),a=t(24609);const s=(0,o.eU)(null);var r=t(52419);const A=(0,o.eU)((e=>{const n=e(s);if(n)return n;const t=e(a.EG),o=e((0,a.U2)({id:t,key:"plan"}));return(0,r.M)(o)}),((e,n,t)=>n(s,t))),c=()=>(0,i.md)(A),l=()=>(0,i.fp)(A)},85005(e,n,t){"use strict";t.d(n,{n:()=>h});t(62953);var o=t(96540),i=t(41344),a=t(42358),s=t(92368),r=t(24609),A=t(19186),c=t(29263),l=t(32788),d=t(3319),u=t(74848);const h=e=>{let{onClose:n,onDone:t,isSubmodal:h=!0}=e;const m=(0,r.vt)(),[g,p]=(0,o.useState)(""),[f,y,b]=(0,a.useInputValue)({maxChars:255,value:""}),E=(e=>{const n=(0,i.Zp)(),t=(0,r.bq)(),{sendLog:a}=(0,d.A)();return(0,o.useMemo)((()=>o=>{let{slug:s,id:r}=o;a({feature:"CreatedRoom",isSuccess:!0,roomId:r});const A=(0,i.tW)("/spaces/:spaceSlug/rooms/:roomSlug",{spaceSlug:t,roomSlug:s});e(),n(A)}),[t,a])})(n),w=(0,A.NG)(m,{onSuccess:t||E}),B=(0,o.useCallback)((()=>{w({name:g,description:f})}),[g,f]);return(0,u.jsxs)(l.GO,{onClose:n,children:[(0,u.jsx)(c.z,{onClose:n,isSubmodal:h,title:"Create a new room",children:(0,u.jsx)(a.Button,{label:"Add",onClick:B,disabled:!g})}),(0,u.jsx)(l.Yv,{children:(0,u.jsx)(s.U,{roomName:g,setRoomName:p,roomDescription:f,charsDescIndicator:b,setRoomDescription:y,isCreateForm:!0})})]})}},8363(e,n,t){"use strict";t.d(n,{Ay:()=>u,TU:()=>l,kI:()=>c,rj:()=>d});t(62953);var o=t(42358),i=t(24609),a=t(6304),s=t(19186),r=t(99728),A=t(74848);const c=e=>{const n=1===e;return"Delete ".concat(e," ").concat(n?"room":"rooms")},l=e=>"Delete ".concat(e," room"),d=e=>{let{name:n,roomsLength:t,spaceName:o}=e;const i=n||"".concat(t,1===t?" room":" rooms");return(0,A.jsxs)(A.Fragment,{children:["You are about to delete ",(0,A.jsx)("strong",{children:i})," from ",(0,A.jsx)("strong",{children:o})," space.",(0,A.jsx)("br",{}),"This cannot be undone. Are you sure you want to continue?"]})},u=e=>{let{id:n,name:t,navigateToParent:c}=e;const u=(0,i.ap)("name"),h=(0,s.wz)(n,"untouchable"),m=(0,r.JT)("room:Delete")&&!h,g=(0,s.HX)(n,{onSuccess:c}),[p,,f,y]=(0,a.A)();return m?(0,A.jsxs)(A.Fragment,{children:[(0,A.jsx)(o.Button,{"data-testid":"manageRoom-delete",flavour:"hollow",onClick:f,label:"Delete room",danger:!0}),p&&(0,A.jsx)(o.ConfirmationDialog,{confirmLabel:"Yes, delete","data-ga":"delete-war-room-dialog","data-testid":"deleteWarRoomDialog",handleConfirm:g,handleDecline:y,message:(0,A.jsx)(d,{name:t,spaceName:u}),title:l(t)})]}):null}},92368(e,n,t){"use strict";t.d(n,{U:()=>P});var o=t(64467),i=t(80045),a=(t(89463),t(84864),t(27495),t(98992),t(54520),t(3949),t(62953),t(96540)),s=t(42358),r=t(16866),A=t(49916),c=t(19186),l=t(99728),d=t(82505),u=t(6304),h=t(74848);function m(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function g(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?m(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):m(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const p="leave-war-room-dialog",f="leaveWarRoomDialog",y=e=>{let{id:n,name:t,navigateToParent:o}=e;const i=(0,d.ES)(n,"ids"),a=(0,c.NQ)(n,{onSuccess:o}),r=(0,c.wz)(n,"untouchable"),[A,,l,m]=(0,u.A)(),y=1===i.length&&!r?{"data-ga":"".concat(p,"-last-member"),"data-testid":"".concat(f,"LastMember"),message:(0,h.jsxs)(h.Fragment,{children:["If you leave, ",(0,h.jsx)("strong",{children:t})," room will be deleted immediately.",(0,h.jsx)("br",{}),"Are you sure you want to continue?"]}),title:"Leave and delete ".concat(t," room")}:{"data-ga":p,"data-testid":f,message:(0,h.jsxs)(h.Fragment,{children:["You are about to leave ",(0,h.jsx)("strong",{children:t})," room.",(0,h.jsx)("br",{}),"Are you sure you want to continue?"]}),title:"Leave ".concat(t," room")};return(0,h.jsxs)(h.Fragment,{children:[(0,h.jsx)(s.Button,{"data-ga":"manage-war-room-tab::click-leave-war-room::manage-war-room-modal","data-testid":"manageRoom-leave",icon:"switch_off",flavour:"borderless",label:"Leave room",neutral:!0,padding:[0],width:"fit-content",onClick:l}),A&&(0,h.jsx)(s.ConfirmationDialog,g({confirmLabel:"Yes, leave",handleConfirm:a,handleDecline:m},y))]})};var b=t(8363),E=t(89841);const w=["charsIndicator","isDisabled","isValid","handleChange","setIsValid","setValidationMessage","validationMessage","value"];function B(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const C=(0,E.k)([e=>{const n=e.length>=1;return(0,E.H)(n,"Give your room a name that's at least one character.")},e=>{const n=e.length<=20;return(0,E.H)(n,"A room's name can't exceed 20 characters.")}]),M=e=>{let{charsIndicator:n,isDisabled:t,isValid:r,handleChange:A,setIsValid:c,setValidationMessage:l,validationMessage:d,value:u}=e,m=(0,i.A)(e,w);const[g,p]=(0,s.useTouchedState)({});return(0,a.useEffect)((()=>{const e=C(u),n=e.isValid,t=e.messages&&e.messages.length?e.messages[0]:void 0;!r&&n?c(!0):r&&!n&&c(!1),t&&l(t)}),[r,c,u,g,l]),(0,h.jsx)(s.TextInput,function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?B(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):B(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({"data-testid":"warRoomOptions-warRoomNameInput",label:"Room name",name:"createRoom",placeholder:"Enter your room's name",hint:"Tip: Use rooms to group your Nodes by their service, purpose, or location.",fieldIndicator:n,value:u,touched:g,onBlur:p,onChange:A,success:r,error:!r&&d,instantFeedback:"all",disabled:t,autoFocus:!0},m))};var T=t(30005);const I=["id"];function v(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function _(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?v(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):v(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const Q=e=>{let{id:n}=e,t=(0,i.A)(e,I);return(0,h.jsxs)(s.Flex,_(_({column:!0,gap:1},t),{},{children:[(0,h.jsx)(s.Text,{strong:!0,children:"Room Id"}),(0,h.jsx)(T.Ay,{confirmationText:"Room ID copied to your clipboard.",children:n})]}))};var D=t(60908);const x=["id","navigateToParent","isCreateForm","roomName","setRoomName","roomDescription","setRoomDescription","charsDescIndicator","onSaveClick"];function k(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function R(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?k(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):k(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const S=new RegExp(/^[\w\s\d]*?$/),P=e=>{let{id:n,navigateToParent:t,isCreateForm:o,roomName:d,setRoomName:u,roomDescription:m,setRoomDescription:g,charsDescIndicator:p,onSaveClick:f}=e,E=(0,i.A)(e,x);const w=(0,c.wz)(n),B=(0,A.DL)(),[C,T]=(0,a.useState)(!1),[I,v]=(0,a.useState)(""),[_,k]=(0,a.useState)(""),[P]=(0,s.useInputValue)({maxChars:r.ux}),F=(0,a.useCallback)((e=>u(e.target.value)),[u]),Y=!!n,U=((0,l.JT)("room:LeaveAllNodes")||!w.untouchable)&&B.length>1&&w.isMember,[N,j]=(0,s.useTouchedState)({defaultState:!0}),z=!w.name||d===w.name&&m===w.description;return(0,a.useEffect)((()=>{k(S.test(m)&&N?"":"The description can only contain digits, letters, and spaces.")}),[N,m]),(0,h.jsx)(D.Ay,{tab:"Room::RoomForm",children:(0,h.jsxs)(s.Flex,R(R({width:{max:150},gap:6,column:!0,"data-testid":"manageRoom",height:"100%"},E),{},{children:[(0,h.jsxs)(s.Flex,{column:!0,"data-testid":"manageRoom-settings",gap:2,children:[(0,h.jsx)(M,{"data-testid":"manageRoom-createRoomInput",value:d,handleChange:F,charsIndicator:P,validationMessage:I,setValidationMessage:v,isValid:C,setIsValid:T,isDisabled:Y&&w.untouchable}),(0,h.jsx)(s.TextInput,{"data-testid":"manageRoom-descriptionInput",label:"Description",name:"roomDescription",placeholder:"A room description...",instantFeedback:"positiveFirst",touched:N,onBlur:j,fieldIndicator:p,error:""!==_&&_,success:""===_,value:m,onChange:g}),Y?(0,h.jsx)(Q,{"data-testid":"manageRoom-roomIdInput",id:n,width:{base:150}}):null]}),(0,h.jsxs)(s.Flex,{"data-testid":"manageRoom-actions",justifyContent:"between",children:[(0,h.jsxs)(s.Flex,{"data-testid":"manageRoom-deleteLeaveActions",gap:4,children:[Y&&U&&(0,h.jsx)(y,{id:n,name:w.name,navigateToParent:t}),Y&&(0,h.jsx)(b.Ay,{id:n,name:d,navigateToParent:t})]}),!o&&(0,h.jsx)(s.Button,{"data-ga":"manage-war-room-tab::click-save::manage-war-room-modal","data-testid":"manageRoom-saveButton",label:"Save",onClick:f,disabled:z})]})]}))})}},73467(e,n,t){"use strict";t.d(n,{A:()=>c});t(98992),t(54520),t(3949);var o=t(64467),i=t(42358),a=t(45087),s=t(74848);function r(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function A(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?r(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):r(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const c=e=>(0,s.jsx)(a.A,{content:"Room you're a member of. You're able to receive notifications related to nodes in this room",children:(0,s.jsx)(i.Icon,A({name:"checkmark",width:"12px",height:"12px","data-testid":"svg"},e))})},2863(e,n,t){"use strict";t.d(n,{S:()=>g,z:()=>p});var o=t(64467),i=(t(98992),t(54520),t(3949),t(62953),t(52035)),a=t(34843),s=t(18790),r=t(2404),A=t.n(r),c=t(24609);const l={loaded:!1,entries:[],error:null,updatedAt:""},d=(0,s.I)((()=>(0,i.eU)(l)));var u=t(57377);function h(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function m(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?h(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):h(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const g=(0,s.I)((e=>{let{id:n,key:t}=e;return(0,i.eU)((e=>e(d(n))[t]),((e,o,i)=>{o(d(n),"error"!==t?e=>({loaded:!0,entries:A()(i,e.entries)?e.entries:i,updatedAt:(new Date).toISOString()}):m(m({},l),{},{error:i}))}))}),u.Ay),p=e=>{const n=(0,c.vt)();return(0,a.md)(g({id:n,key:e}))}},84308(e,n,t){"use strict";t.d(n,{X:()=>a,_:()=>i});var o=t(91130);const i=e=>o.A.get("/api/v1/spaces/".concat(e,"/token"),{allow401:!0}),a=e=>o.A.post("/api/v1/spaces/".concat(e,"/token/rotate"),void 0,{allow401:!0})},46391(e,n,t){"use strict";t.d(n,{A:()=>i});var o=t(52035);const i=(0,t(18790).I)((()=>(0,o.eU)(null)))},55429(e,n,t){"use strict";t.d(n,{A:()=>c});var o=t(96540),i=t(34843),a=t(30569),s=t(99728),r=t(84308),A=t(46391);const c=e=>{const n=(0,i.md)((0,A.A)(e)),t=(0,s.JT)("node:Create",e),c=(0,a.yF)((0,o.useCallback)((async(e,n,t)=>{if(!await e((0,A.A)(t))){const{data:e}=await(0,r._)(t);n((0,A.A)(t),e)}}),[]));return(0,o.useEffect)((()=>{t&&e&&c(e)}),[t,e,c]),n}},60266(e,n,t){"use strict";t.d(n,{A:()=>A});t(62953);var o=t(96540),i=t(24609),a=t(10853);var s=t(6304);const r=e=>{const n=new Date(e||void 0).toDateString();return"Invalid Date"!==n?n:null},A=()=>{var e;const n=(0,i.ap)(),t=(0,o.useMemo)((()=>"".concat("dismissedBumpedWarningKey","_").concat(null===n||void 0===n?void 0:n.id)),[null===n||void 0===n?void 0:n.id]),[A,,,c]=(0,s.A)(!localStorage.getItem(t)),{trialEndsAtRaw:l}=(0,a.A)(),d=l&&"EarlybirdAndCommunitySunset"===(null===n||void 0===n||null===(e=n.metadata)||void 0===e?void 0:e.joinTrialCode),u=A&&l&&d,h=d?"EARLYB25":null,m=(0,o.useCallback)((()=>{localStorage.setItem(t,!0),c()}),[c,t]);return{isModalVisible:u,isEarlybirdAndCommunitySunset:d,coupon:h,trialEndsAt:r(l),onClose:m}}},68399(e,n,t){"use strict";t.d(n,{$B:()=>r,TB:()=>A,W1:()=>i,ml:()=>a,ue:()=>o,w1:()=>s});const o={default:"successSemi",warning:"warningSemi",critical:"errorSemi"},i={default:{background:"successSemi",border:"success"},warning:{background:"warningSemi",border:"warning"},critical:{background:"errorSemi",border:"error"}},a=[10,3,2,1],s=12,r="dismissedTrialWelcome",A="dismissedTrialWarningDate"},54677(e,n,t){"use strict";t.d(n,{A:()=>B});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(96540),s=t(42358),r=t(63950),A=t.n(r),c=t(10853),l=t(10602),d=t(81391),u=t(24013);const h=()=>{const e=(0,d.n)("id"),n=(0,l.gr)(e,"loaded"),t=(0,l.gr)(e,"ids");return{loaded:n,windowsNodes:((0,u.Gt)(t)||[]).filter((e=>{var n;let{os:t}=e;return"windows"===(null===t||void 0===t||null===(n=t.id)||void 0===n?void 0:n.toLowerCase())}))}};var m=t(60266),g=t(99243),p=t(74848);const f=["canUpgrade","onUpdateClick","children"];function y(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function b(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?y(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):y(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const E={banner:{color:"main"},sidebar:{lineHeight:"1.6",color:"main"},freePlanUpgrade:{lineHeight:"1.6",color:"main"},billing:{color:"textLite"}},w=e=>{let{canUpgrade:n,onUpdateClick:t=A(),children:o}=e,a=(0,i.A)(e,f);return n?(0,p.jsx)(s.Box,b(b({"data-testid":"upgrade-to-business-banner",onClick:t,as:s.Text,cursor:"pointer",textDecoration:"underline",color:"main"},a),{},{children:o})):null},B=e=>{let{flavour:n,couponRemainingDays:t,onUpdateClick:o=A(),inBanner:i}=e;const{daysRemaining:r,canUpgrade:l,trialEndsAt:d}=(0,c.A)(),{isEarlybirdAndCommunitySunset:u}=(0,m.A)(),{loaded:f,windowsNodes:y}=h(),B=(0,a.useMemo)((()=>({isCoupon:t>0,isEarlybirdAndCommunitySunset:u,isBanner:"banner"===n,isSidebar:"sidebar"===n,isBilling:"billing"===n,isFreePlanUpgrade:"freePlanUpgrade"===n})),[n,t]);return(0,p.jsx)(s.Flex,{justifyContent:B.isBilling?"start":"center",alignItems:"center",width:"100%",gap:2,children:B.isCoupon?(0,p.jsxs)(s.Flex,{column:!0,alignItems:i?"center":"start",children:[(0,p.jsx)(s.Text,b(b({},E[n]),{},{fontSize:"10px",lineHeight:"14px",strong:!0,children:t>2?"Discount auto-applies at checkout.":"Upgrade now. Offer auto-applies."})),t>2?(0,p.jsxs)(s.Flex,{column:!0,alignItems:i?"center":"start",children:[(0,p.jsxs)(s.Flex,{column:!i,gap:i?1:0,alignItems:i?"center":"start",children:[(0,p.jsxs)(s.Text,b(b({},E[n]),{},{fontSize:i?"24px":"36px",lineHeight:i?"24px":"40px",strong:!0,children:["\ud83e\ude84"," ",(0,p.jsx)(g.Te,b(b({},E[n]),{},{fontSize:i?"24px":"36px",lineHeight:i?"24px":"38px",strong:!0,children:"10% Off"}))]})),(0,p.jsx)(g.Te,b(b({},E[n]),{},{fontSize:"24px",lineHeight:i?"24px":"26px",strong:!0,children:"All Annual Plans"}))]}),(0,p.jsx)(g.Te,b(b({},E[n]),{},{fontSize:i?"14px":"20px",lineHeight:i?"18px":"22px",strong:!0,children:"Black Friday Week!"}))]}):(0,p.jsxs)(s.Flex,{column:!0,alignItems:i?"center":"start",children:[(0,p.jsxs)(s.Text,b(b({},E[n]),{},{fontSize:i?"14px":"30px",lineHeight:i?"18px":"36px",strong:!0,children:["\u231b"," ",(0,p.jsx)(g.Te,b(b({},E[n]),{},{fontSize:i?"14px":"30px",lineHeight:i?"18px":"32px",strong:!0,letterSpacing:i?"normal":"1px",children:"Ends soon"}))]})),(0,p.jsxs)(s.Flex,{column:!i,gap:i?1:0,alignItems:i?"center":"start",children:[(0,p.jsx)(g.Te,b(b({},E[n]),{},{fontSize:i?"24px":"46px",lineHeight:i?"24px":"48px",strong:!0,letterSpacing:i?"normal":"2.4px",children:"10% Off"})),(0,p.jsx)(g.Te,b(b({},E[n]),{},{fontSize:"24px",lineHeight:i?"24px":"26px",strong:!0,letterSpacing:i?"normal":"0.5px",children:"All Annual Plans"}))]})]})]}):B.isEarlybirdAndCommunitySunset?(0,p.jsxs)(s.Flex,{column:!0,gap:1,alignItems:"center",children:[(0,p.jsxs)(s.Text,b(b(b({},E[n]),{},{textAlign:"center",strong:!0},B.isBanner?{}:{fontSize:"10px"}),{},{children:["Thank you for your support!"," ",B.isBanner?(0,p.jsx)(w,{canUpgrade:l,onUpdateClick:o,children:"Upgrade"}):null]})),(0,p.jsx)(s.Text,b(b({},E[n]),{},{color:"primary",fontSize:"22px",lineHeight:.8,strong:!0,children:"25% Lifetime off"}))]}):B.isFreePlanUpgrade?(0,p.jsx)(s.Text,b(b({},E[n]),{},{children:"Upgrade your plan for unlimited access and Business features."})):(0,p.jsxs)(s.Flex,{column:!0,gap:1,justifyContent:"center",children:[(0,p.jsxs)(s.Text,b(b({},E[n]),{},{children:["You have ",(0,p.jsx)(s.Text,b(b({strong:!0},E[n]),{},{children:"".concat(r," days")}))," ","left to explore all the features of Netdata Business."," ",B.isBilling&&(0,p.jsxs)(p.Fragment,{children:["Trial ends at"," ",(0,p.jsx)(s.Text,b(b({strong:!0},E[n]),{},{children:d})),"."," "]}),B.isBanner?(0,p.jsx)(w,{canUpgrade:l,onUpdateClick:o,children:"Consider upgrading for unlimited access."}):(0,p.jsx)(p.Fragment,{children:"Consider upgrading for unlimited access."})]})),f&&y.length?(0,p.jsxs)(s.Flex,{alignItems:"center",justifyContent:"center",gap:1,children:[(0,p.jsx)(s.Icon,{name:"warning_triangle",color:"warning"}),(0,p.jsxs)(s.Text,b(b({},E[n]),{},{children:["Your"," ",(0,p.jsx)(s.Text,b(b({strong:!0},E[n]),{},{children:"Windows"}))," ","nodes will be restricted after the trial period ends."]}))]}):null]})})}},99243(e,n,t){"use strict";t.d(n,{PL:()=>c,Te:()=>l,bg:()=>A});t(98992),t(54520),t(3949);var o=t(64467),i=t(51510),a=t(42358),s=t(68399);function r(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const A=(0,i.default)(a.Flex).attrs({position:"relative"}).withConfig({displayName:"styled__TrialWarningSidebar",componentId:"sc-66x250-0"})(["background-color:",";border-width:1px;border-style:dashed;border-color:",";border-radius:2px;"],(e=>{var n;let{type:t}=e;return(0,a.getColor)(null===(n=s.W1[t])||void 0===n?void 0:n.background)}),(e=>{var n;let{type:t}=e;return(0,a.getColor)(null===(n=s.W1[t])||void 0===n?void 0:n.border)})),c=(0,i.default)(a.Button).withConfig({displayName:"styled__TrialUpgradeButton",componentId:"sc-66x250-1"})(["flex:auto;",""],(e=>{let{days:n,theme:t}=e;return n>0&&"color: ".concat((0,a.getColor)("bright")({theme:t})," !important;")})),l=(0,i.default)(a.TextBigger).attrs((e=>function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?r(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):r(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({strong:!0,lineHeight:.8,letterSpacing:"normal",whiteSpace:"nowrap"},e))).withConfig({displayName:"styled__PromoText",componentId:"sc-66x250-2"})(["background-color:",";background-image:linear-gradient( 180deg,"," 0%,"," 100% );-webkit-background-clip:text;-webkit-text-fill-color:transparent;letter-spacing:",";"],(0,a.getColor)("bright"),(0,a.getColor)(["blue","aquamarine"]),(0,a.getColor)(["purple","mauve"]),(e=>e.letterSpacing))},2652(e,n,t){"use strict";t.d(n,{A:()=>s});var o=t(96540),i=t(41344),a=t(88325);const s=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";const n=(0,i.Zp)(),{url:t}=(0,a.A)("business");return(0,o.useCallback)((()=>{n(t,{replace:!0,state:{coupon:e}})}),[t,e])}},44913(e,n,t){"use strict";t.r(n),t.d(n,{default:()=>P,useStaticCoupon:()=>k});var o=t(64467),i=t(80045),a=(t(98992),t(54520),t(3949),t(62953),t(96540)),s=t(42358),r=t(42539),A=t(12268),c=t(16798),l=t(45710),d=t(32370),u=t(41394),h=t(64587),m=t(56171),g=t(45087),p=t(6304),f=t(94404),y=t(78152),b=t(10853),E=t(51262),w=t(60266),B=t(68399),C=t(99243),M=t(54677),T=t(2652),I=t(74848);const v=["canUpgrade"];function _(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function Q(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?_(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):_(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const D=y.DO?(0,A.D)(new Date(2025,10,30)):(0,A.D)(new Date(2024,11,31)),x=new Date,k=()=>{const{loaded:e,isNewUser:n}=(0,E.A)(),t=e&&n&&parseFloat((D-x)/1e3/60/60/24);return[t>0?"BF1Y2025":"",t]},R=(S=(0,f.A)(C.PL),e=>{let{canUpgrade:n}=e,t=(0,i.A)(e,v);return n?(0,I.jsx)(S,Q({},t)):(0,I.jsx)(g.A,{content:"You don't have permissions to upgrade the plan",children:(0,I.jsx)(s.Flex,{children:(0,I.jsx)(S,Q({disabled:!0},t))})})});var S;const P=e=>{let{flavour:n="banner",showCoupon:t}=e;const{localeDateString:o}=(0,h.$j)(),{onTrial:i,sidebarWarningVisible:A,bannerVisible:g,dismissBanner:f,type:y,canUpgrade:E,planIsFreeOrEarlyBird:v}=(0,b.A)(),{coupon:_}=(0,w.A)(),[Q,x]=k(),S=_||Q,P=(0,T.A)(S),F=(!!S||!i)&&v,[Y,U]=(0,p.A)();return(0,a.useEffect)((()=>{const e=localStorage.getItem("dismissSidebarBanner");e&&(0,d.f)((0,u.H)(e))&&!(0,l.R)((0,u.H)(e))||U(!0)}),[]),"sidebar"===n&&(A||F)?Y?(0,I.jsxs)(C.bg,{type:i?y:"default",column:!0,gap:2,margin:[2],padding:[2],children:[(0,I.jsx)(s.Box,{"data-testid":"close-button",as:s.Icon,color:"text",cursor:"pointer",name:"x",position:"absolute",right:"2px",top:"2px",width:"12px",height:"12px",onClick:()=>{U(),localStorage.setItem("dismissSidebarBanner",(0,r.W)(new Date,{days:2}).toISOString())}}),(0,I.jsx)(M.A,{flavour:i?n:"freePlanUpgrade",couponRemainingDays:x,expDate:D,onUpdateClick:P}),(0,I.jsx)(R,{feature:"UpgradeToBusiness",isStart:!0,small:!0,"data-testid":"upgradeToBusiness-sidebar",label:x>0?"Upgrade & Save":"Upgrade",canUpgrade:E,onClick:P,days:x}),x>0&&(0,d.f)(D)&&(0,I.jsx)(s.TextNano,{strong:!0,textAlign:"center",lineHeight:.8,children:"Expires ".concat(x<3?(0,c.B)(D,{addSuffix:!0}):o(D))})]}):null:t&&x>0||g?(0,I.jsx)(m.A,{testId:"trial-banner",width:"100%",background:t?B.ue.default:B.ue[y],onClose:f,tooltipProps:{align:"top"},zIndex:20,children:(0,I.jsx)(M.A,{flavour:i?n:"freePlanUpgrade",couponRemainingDays:x,expDate:D,onUpdateClick:P,inBanner:!0})}):null}},10853(e,n,t){"use strict";t.d(n,{A:()=>f});t(98992),t(72577),t(62953);var o=t(96540),i=t(46587),a=t(99728),s=t(24285),r=t(86706),A=t(427),c=(t(37550),t(32370)),l=t(73087),d=t(68399);var u=t(18739),h=t(53142),m=t(18387),g=t(19707);const p=(e,n)=>{try{const t=new Date(e||void 0).toLocaleDateString(n);return"Invalid Date"!==t?t:null}catch(t){const n=new Date(e||void 0).toLocaleDateString();return"Invalid Date"!==n?n:null}},f=()=>{const{locale:e}=(0,A.bO)(),n=(0,r.gk)(),t=(0,r.bj)(),{id:f,isAnonymous:y}=(0,i.uW)(),b=(0,o.useMemo)((()=>!!(t||[]).find((e=>e.id===f&&"creator"===e.joinMethod))),[f,t]),E=(0,g.I)(),{loaded:w,value:B,refresh:C}=(0,u.JN)(),{isFailure:M}=(0,h.A)(),{slug:T,trialEndsAt:I,paymentProvider:v}=B||{},_=(0,o.useMemo)((()=>(e=>{if(!e)return null;const n=new Date(e)-new Date;return Math.ceil(n/864e5)})(I)),[I]),Q=w&&!!I&&!E,D=(0,a.JT)("billing:Manage"),[x]=(0,s.ng)("trialModalDismissed"),k=n&&b&&D&&(Q||M)&&!localStorage.getItem(d.$B)&&!x,[R,S]=(0,o.useState)(localStorage.getItem(d.TB)),[P,F]=(0,o.useState)(),Y=(U=_)>8?"default":U>3?"warning":"critical";var U;const N=!y&&D;return(0,o.useEffect)((()=>{const e=((e,n,t,o)=>{if(n<0)return!1;const i=new Date(e||void 0);if(!(0,c.f)(i))return!1;const a=new Date(o||void 0);if(!(0,c.f)(a))return n<=d.w1;const{days:s}=(0,l.F)({start:a,end:i});return t.some((e=>e>=n&&e<s))})(I,_,d.ml,R);F(e)}),[I,_,R]),{trialWelcomeVisible:k,sidebarWarningVisible:Q,bannerVisible:P,dismissBanner:()=>{const e=(new Date).toISOString();S(e),localStorage.setItem(d.TB,e)},daysRemaining:_,trialEndsAt:p(I,e),trialEndsAtRaw:I,type:Y,canUpgrade:N,onTrial:Q,refreshPlan:C,planIsFreeOrEarlyBird:(0,m.Kj)(T),paymentProvider:v}}},18387(e,n,t){"use strict";t.d(n,{Dy:()=>l,JR:()=>p,Kj:()=>r,Lf:()=>h,M7:()=>d,di:()=>u,qN:()=>A,z_:()=>c});var o=t(64467),i=(t(98992),t(54520),t(3949),t(8872),t(62953),t(50979));function a(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function s(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?a(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):a(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const r=e=>[i.VH.free,i.VH.earlyBird].includes(e),A=e=>{if(!e)return null;const{city:n,country:t,line_1:o,line_2:i,postalCode:a,state:s}=e;return[[o,i].filter(Boolean).join(" "),n,a,s,t].filter(Boolean).join(", ")},c=e=>{let{currentPlan:n,slug:t,version:o,onTrial:i}=e;const{slug:a,version:s,interval:A}=n;return a===t&&s===o?r(t)?"Current plan":i?"Upgrade":"year"===A?"Update plan options":"Change billing frequency":r(a)&&!r(t)||"pro"===a&&"business"===t?"Upgrade":"Select"},l=(e,n)=>r(n)?"hollow":"business"===n||"pro"===n&&r(e)?"default":"hollow",d=(e,n)=>{const t=e.split("."),o=new Date(t[0],t[1]-1),i=n.split(".");return new Date(i[0],i[1]-1)-o},u=function(){let{price:e={},promotionCode:n,commitment:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{id:o,commitment:i}=e,a={productId:o,promotionCode:n};return!o||i&&!t?null:i&&t?s(s({},a),{},{commitment:t}):a},h=function(){let{price:e={},promotionCode:n,commitment:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{id:o,commitment:i}=e,a={id:o,promoCode:n};return!o||i&&!t?null:i&&t?s(s({},a),{},{commitment:t}):a},m=["Community","Business"],g=e=>{for(let n=0;n<m.length;n++)if(e.startsWith(m[n]))return n;return 0},p=(e,n)=>!(!e||!n)&&g(e)<g(n)},30582(e,n,t){"use strict";t.d(n,{A:()=>y});t(62953);var o=t(96540),i=t(42358),a=t(91370),s=t(16866),r=t(32788),A=t(29263),c=t(47410),l=(t(84864),t(27495),t(99236)),d=t(89841),u=t(74848);const h=new RegExp(/(\u00a9|\u00ae|[\u2000-\u3300]|\ud83c[\ud000-\udfff]|\ud83d[\ud000-\udfff]|\ud83e[\ud000-\udfff]|[!@#$%^&*()_+=\-])/),m=(0,d.k)([e=>{const n=e.length>=l.pz;return(0,d.H)(n,"Please enter a name for the Space that is at least 5 characters.")},e=>{const n=e.length<=l.dy;return(0,d.H)(n,"A Space's name can't exceed 20 characters.")},e=>h.test(e)?{isValid:!1,message:"There's an unexpected character in the Space's name. Use only alphanumeric characters (A-Z, a-z, 0-9) and spaces."}:{isValid:!0}]),g=e=>{let{value:n,isValid:t,setIsValid:a,charsIndicator:s,isDirty:r,handleChange:A,validationMessage:c,setValidationMessage:l,onKeyDown:d}=e;const[h,g]=(0,i.useTouchedState)({});return(0,o.useEffect)((()=>{const e=m(n),o=e.isValid,i=e.messages&&e.messages.length?e.messages[0]:void 0;!t&&o?a(!0):t&&!o&&a(!1),i&&l(i)}),[t,n,h,a,l]),(0,u.jsx)(i.TextInput,{label:"Space name",name:"createWorkspace",placeholder:"Enter your Space's name",hint:"Give your Space a name that's between 5-20 characters. This cannot change.",fieldIndicator:s,value:n,touched:h,onBlur:g,onChange:A,success:t,error:!t&&c,instantFeedback:"positiveFirst",isDirty:r,onKeyDown:d,autoFocus:!0})};var p=t(3319),f=t(60908);const y=e=>{let{onClose:n,onDone:t}=e;const[d,h]=(0,o.useState)(!1),[m,y]=(0,o.useState)(!1),[b,E,w,B]=(0,i.useInputValue)({maxChars:l.dy}),[C,M]=(0,o.useState)(""),{sendLog:T,isReady:I}=(0,p.A)(),v=(0,o.useCallback)((e=>{y(!1),n(),null===t||void 0===t||t(e),T({feature:"CreateSpace",isSuccess:!0})}),[n,T,I]),_=(0,o.useCallback)((()=>{y(!1),T({feature:"CreateSpace",isFailure:!0})}),[T,I]),Q=(0,a.A)({onSuccess:v,onError:_}),D=(0,o.useCallback)((()=>{d&&(y(!0),Q({name:b}))}),[b,d]);return(0,u.jsx)(r.GO,{onClose:n,children:(0,u.jsxs)(f.Ay,{feature:"CreateSpace",children:[(0,u.jsx)(A.z,{onClose:n,title:"Create Space",children:(0,u.jsx)(i.Button,{disabled:!d,isLoading:m,loadingLabel:"Creating",label:"Save",onClick:D})}),(0,u.jsx)(c.U,{children:"Create a new Space"}),(0,u.jsx)(r.Yv,{children:(0,u.jsx)(g,{isDirty:B,isValid:d,setIsValid:h,value:b,handleChange:E,charsIndicator:w,validationMessage:C,setValidationMessage:M,onKeyDown:e=>{e.keyCode===s.I7&&d&&D()}})})]})})}},63936(e,n,t){"use strict";t.d(n,{A:()=>p});t(26910),t(98992),t(81454);var o=t(96540),i=t(42358),a=t(51510),s=t(41395),r=t(67935),A=t(24609),c=t(79748),l=t(41344),d=t(99728),u=t(88325),h=t(74848);const m={admin:"Users with this role can control Spaces, Rooms, Nodes, Users and Billing. They can also access any Room in the Space.",member:"Users with this role can create Rooms and invite other Members. They can only see the Rooms they belong to and all Nodes in the All Nodes room",manager:"Users with this role can manage Rooms and Users. They can access any Room in the Space.",troubleshooter:"Users with this role can use Netdata to troubleshoot, not manage entities. They can access any Room in the Space.",observer:"Users with this role can only view data in specific Rooms.",billing:"Users with this role can handle billing options and invoices."},g=(0,a.default)(i.Flex).withConfig({displayName:"rolePicker__PlanBadge",componentId:"sc-szt8jq-0"})(["pointer-events:auto;"]),p=e=>{let{availableRoles:n,dataGA:t,dataTestId:a,onChange:p,value:f}=e;const y=(0,A.ap)("plan"),b=(0,o.useMemo)((()=>(0,r.L_)(y).map((e=>({isChecked:e===f,isEnabled:n.includes(e),role:e}))).sort(((e,n)=>Number(n.isEnabled)-Number(e.isEnabled)))),[n,r.L_,y,f]),E=(0,d.JT)("billing:ReadAll"),{url:w}=(0,u.A)();return(0,h.jsx)(i.Flex,{column:!0,gap:2,"data-testid":"".concat(a,"-roleOptions"),children:b.map((e=>{let{isChecked:n,isEnabled:o,role:r}=e;const A=o?void 0:"medium",d="troubleshooter"===r?"pro":"business";return(0,h.jsx)(i.RadioButton,{checked:n,"data-ga":"".concat(t,"::select-role-").concat(r,"::global-view"),"data-testid":"".concat(a,"-").concat(r,"Option"),disabled:!o,onChange:p,value:r,alignItems:"start",children:(0,h.jsxs)(i.Flex,{column:!0,children:[(0,h.jsxs)(i.Flex,{gap:2,alignItems:"center",children:[(0,h.jsx)(i.Text,{opacity:A,children:(0,s.Zr)(r)}),!o&&(0,h.jsx)(g,{background:"sideBarMini",border:{side:"all",color:"border"},cursor:"initial",padding:[1],round:!0,children:(0,h.jsx)(c.A,{align:"bottom",as:l.N_,color:"text",Component:i.TextMicro,content:"Upgrade your plan in order to use this role","data-ga":"".concat(t,"::click-plan-badge-").concat(d,"::global-view"),disabled:!E,hoverColor:"textFocus",showToolTip:!0,strong:!0,to:w,children:"Upgrade now!"})})]}),(0,h.jsx)(i.TextSmall,{color:"textLite",opacity:A,children:m[r]})]})},r)}))})}},80372(e,n,t){"use strict";t.d(n,{$d:()=>d,A8:()=>r,DT:()=>p,G0:()=>A,IV:()=>h,Wk:()=>g,aj:()=>i,bO:()=>c,bq:()=>f,mm:()=>o,uX:()=>y,ys:()=>m});const o="info",i="rooms",a="nodes",s="users",r="notifications",A="integrations",c="billing",l="integrations",d="configurations",u="authenticationTab",h=[o,i,a,s,r,c,l,d,u],m={[o]:"Info",[i]:"Rooms",[a]:"Nodes",[s]:"User Management",[r]:"Alerts & Notifications",[A]:"Services",[c]:"Plan & Billing",[l]:"Integrations",[d]:"Configurations",[u]:"Authentication"},g=h[0],p={virtual:h[7],default:h[0]},f="/spaces/:spaceSlug/settings/:settingsTab/*",y="/spaces/:spaceSlug/settings/:settingsTab/:settingsSubTab/*"},6818(e,n,t){"use strict";t.d(n,{A:()=>Q,o:()=>C});var o=t(64467),i=t(80045),a=(t(9391),t(98992),t(54520),t(3949),t(62953),t(96540)),s=t(41344),r=t(42358),A=t(61337),c=t(79748),l=t(45087),d=t(64999),u=t(99236),h=t(46587),m=t(6304),g=t(18739),p=t(3319),f=t(4204),y=t(74848);const b=["currentPlan"],E=["id","isLastSpace","name","onClose"];function w(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function B(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?w(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):w(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const C=e=>n=>{let{currentPlan:t}=n,o=(0,i.A)(n,b);const{class:a,interval:s,billingEmail:A}=t,l="year"===s?"yearly":"monthy",d="".concat(a," ").concat(l),u=!["Community","EarlyBird"].includes(a),h=!!A&&!u;return(0,y.jsxs)(e,B(B({},o),{},{children:[u&&(0,y.jsxs)(y.Fragment,{children:[(0,y.jsxs)(r.TextBig,{children:["You are currently on ",(0,y.jsx)(r.TextBig,{strong:!0,children:d})," subscription, which will be cancelled automatically and any due credit from unused period will be given to your credit balance."]}),(0,y.jsxs)(r.TextBig,{children:["Any available credit with us won't be automatically lost. If you want to use it in the future, within the defined period on our"," ",(0,y.jsx)(c.A,{Component:r.TextBig,href:"https://www.netdata.cloud/service-terms/",rel:"noopener noreferrer",target:"_blank",children:"Terms of Service"}),", or have any requests about previous invoices you can reach out to"," ",(0,y.jsx)(r.TextBig,{strong:!0,children:"support@netdata.cloud"})]})]}),h&&(0,y.jsxs)(r.TextBig,{children:["You will lose direct access to you invoices and billing information. If you want to retrieve this information in the future, you'll have to contact"," ",(0,y.jsx)(r.TextBig,{strong:!0,children:"support@netdata.cloud"})]})]}))},M="initial",T="reasonConfirmed",I=[{label:"Not using advanced features"},{label:"Switching to a different solution"},{label:"Temporary project ending"},{label:"Lack of specific feature (please specify)",specify:!0},{label:"Other (please specify}",specify:!0}],v=e=>{let{reason:n,setReason:t,text:o,setText:i}=e;const s=I.map((e=>B(B({},e),{},{value:e.label}))),A=!!n.specify,c=(0,a.useCallback)((e=>{var n;i(null===e||void 0===e||null===(n=e.target)||void 0===n?void 0:n.value)}),[i]);return(0,y.jsxs)(r.Flex,{column:!0,gap:4,children:[(0,y.jsx)(r.TextBig,{children:"We're sorry to see you go. To help us improve, please tell us your main reason for deleting your space:"}),(0,y.jsx)(r.Select,{options:s,value:n,onChange:t,placeholder:"Please select a reason"}),A?(0,y.jsx)(r.TextInput,{value:o,onChange:c,placeholder:"Please specify"}):null]})},_=C((e=>{let{spaceName:n,confirmState:t,reason:o,setReason:i,text:a,setText:s,spaceInput:A,setSpaceInput:c,children:l}=e;return t?t===M?(0,y.jsx)(v,{reason:o,setReason:i,text:a,setText:s}):(0,y.jsxs)(r.Flex,{column:!0,gap:2,children:[(0,y.jsxs)(r.TextBig,{children:['To confirm, type "',(0,y.jsx)(r.TextBig,{strong:!0,children:n}),'" in the textbox below']}),(0,y.jsx)(r.TextInput,{value:A,autoFocus:!0,onChange:e=>c(e.target.value)})]}):(0,y.jsxs)(r.Flex,{column:!0,gap:1,padding:[0,0,4,0],children:[(0,y.jsxs)(r.TextBig,{children:["You are about to delete ",(0,y.jsx)("strong",{children:n})," space."]}),l,(0,y.jsx)(r.TextBig,{children:"Are you sure you want to continue?"})]})})),Q=e=>{let{id:n,isLastSpace:t,name:o,onClose:c}=e,b=(0,i.A)(e,E);const w=(0,s.Zp)(),C=(0,d.A)(n),I=(0,h.NJ)(),{value:v}=(0,g.JN)(),[Q,D]=(0,a.useState)(""),[x,k]=(0,a.useState)(""),[R,S]=(0,a.useState)(""),[P,,F,Y]=(0,m.A)(),[U,N]=(0,m.A)(),{sendLog:j,sendButtonClickedLog:z}=(0,p.A)(),[H,O]=(0,a.useState)(),L=(0,a.useMemo)((()=>U?"Deleting...":H===T?"Delete":"Proceed with Deletion"),[U,H]),G=(0,a.useMemo)((()=>U||H===M&&(!Q||Q.specify&&!x)||H===T&&R!==o),[U,H,Q,x,R]),J=(0,a.useCallback)((e=>{c(),j({feature:"DeleteSpace",isSuccess:!0}).finally((()=>w("/spaces/".concat(e))))}),[j,c]),q=(0,a.useCallback)((()=>{const e=f.ei?T:M;O(e)}),[O]),K=(0,a.useCallback)((()=>{O(T)}),[O]),V=(0,a.useCallback)((()=>{N(),C({onSuccess:J}),z({feature:"DeleteSpace",reason:null===Q||void 0===Q?void 0:Q.label,details:x})}),[N,C,Q,x,z]),X=(0,a.useCallback)((()=>H?H===M?K():R===o?V():()=>{}:q()),[o,H,R,q,K,V]),W=(0,a.useCallback)((()=>{Y(),O(),D(""),k(""),S(""),j({feature:"DeleteSpace",isFailure:!0})}),[j,Y,O]),Z=(0,a.useCallback)((()=>{F(),z({feature:"DeleteSpace"})}),[F,z]);return I&&(0,y.jsxs)(A.A,{permission:"space:Delete",children:[(0,y.jsx)(l.A,{align:"top",content:t&&u.sh.delete,isBasic:!0,stretch:"align",children:(0,y.jsx)(r.Box,{children:(0,y.jsx)(r.Button,B({danger:!0,"data-ga":"manage-space-tab::click-delete-space::manage-space-modal","data-testid":"deleteSpace-button",disabled:t,flavour:"hollow",label:"Delete space",onClick:Z,isStart:!0},b))})}),P&&(0,y.jsx)(r.ConfirmationDialog,{confirmLabel:L,"data-ga":"delete-space","data-testid":"deleteSpaceDialog",handleConfirm:X,handleDecline:W,message:(0,y.jsx)(_,{spaceName:o,currentPlan:v,confirmState:H,reason:Q,setReason:D,text:x,setText:k,spaceInput:R,setSpaceInput:S}),title:"Delete ".concat(o," space"),isConfirmDisabled:G,isConfirmLoading:U,isDeclineDisabled:U})]})}},52353(e,n,t){"use strict";t.d(n,{e_:()=>A,fc:()=>s,ni:()=>r,xc:()=>a});t(27495);const o=/^[a-zA-Z0-9@_.-\s!]*$/,i=/^[a-z0-9]+(?:-[a-z0-9]+)*$/,a={nameMinLength:"Space name should be more than 4 characters",slugMinLength:"Space slug should be more than 2 characters",slugMaxLength:"Space slug should not be more than 30 characters",slugNotAvailable:"Space slug is not available. Please try another one.",nameAllowedChars:"Please use alphanumeric characters (A-Z, a-z, 0-9), spaces, periods and supported special characters @, -, _,.",slugAllowedChars:"Please use lowercase alphanumeric characters (a-z, 0-9) or hyphens. The slug cannot start or end with a hyphen."},s=e=>e.length<5?"nameMinLength":o.test(e)?null:"nameAllowedChars",r=e=>e.length<3?"slugMinLength":i.test(e)?null:"slugAllowedChars",A=e=>!o.test(e)&&"allowedChars"},12470(e,n,t){"use strict";t.d(n,{A:()=>_});t(98992),t(54520),t(3949);var o=t(64467),i=t(80045),a=t(42358),s=t(2863),r=(t(72577),t(52035)),A=t(34843),c=t(18790),l=t(24609),d=t(57377);const u={alertCounter:{warning:0,critical:0},unreachableCount:0},h=(0,c.I)((e=>{let{id:n,key:t}=e;return(0,r.eU)((e=>{const o=e(l.EG),i=e((0,s.S)({id:o,key:"entries"})).find((e=>{let{id:t}=e;return t===n}))||u;return t?i[t]:i}))}),d.Ay);var m=t(19186),g=t(85720),p=t(45087),f=t(51510);const y=(0,f.default)(a.Flex).attrs({flex:{grow:0,shrink:0},width:2,height:2,margin:[0,1,0,0],round:1,background:"error",justifyContent:"center",alignItems:"center"}).withConfig({displayName:"indicators__ErrorIndicator",componentId:"sc-19hg3ay-0"})([""]),b=(0,f.default)(y).attrs({background:"warning"}).withConfig({displayName:"indicators__WarningIndicator",componentId:"sc-19hg3ay-1"})([""]),E=(0,f.default)(y).attrs({background:"textLite"}).withConfig({displayName:"indicators__UnreachableIndicator",componentId:"sc-19hg3ay-2"})([""]);var w=t(9509),B=t(93883),C=t(73467),M=t(74848);const T=["id","selectedId","spaceSlug","isSidebar","hideAlerts","differentiateIsMember","isAgent"];function I(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function v(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?I(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):I(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const _=e=>{let{id:n,selectedId:t,spaceSlug:o,isSidebar:r,hideAlerts:c,differentiateIsMember:l,isAgent:d}=e,u=(0,i.A)(e,T);const{alertCounter:{critical:f,warning:I},unreachableCount:_}=((e,n)=>(0,A.md)(h({id:e,key:n})))(n),Q=(0,m.wz)(n,"slug"),D=(0,m.wz)(n,"name"),x=(0,m.wz)(n,"isMember"),k=(0,s.z)("error"),R=(0,s.z)("updatedAt"),{state:S}=(0,w.D)(),{activeNavigationTab:P}=S||{},F=P?"/".concat(P):"";return(0,M.jsx)(g.A,v(v(v({},!!o&&{to:"/spaces/".concat(o,"/rooms/").concat(Q).concat(F)}),{},{testid:"roomLabel-warRoom-".concat(D),actions:c?null:(0,M.jsx)(p.A,{content:(0,M.jsx)(B.A,{error:k,text:"Room alerts",updatedAt:R}),isBasic:!0,align:"right",contentProps:{width:"220px"},children:(0,M.jsxs)(a.Flex,{flex:!1,flexWrap:!1,justifyContent:"end",width:{min:6},height:{min:2},children:[f>0&&(0,M.jsx)(y,{}),I>0&&(0,M.jsx)(b,{}),_>0&&(0,M.jsx)(E,{})]})}),icon:l&&x&&(0,M.jsx)(C.A,{}),iconColor:"successLite",iconHeight:"12px",iconWidth:"12px",gap:1,textProps:v(v({},l&&!x&&{padding:[0,0,0,4]}),{},{strong:d}),selected:n===t,isSidebar:r,isSecondary:!x},u),{},{children:D}))}},31933(e,n,t){"use strict";t.d(n,{A:()=>v});var o=t(80045),i=t(64467),a=(t(26910),t(98992),t(54520),t(3949),t(81454),t(8872),t(37550),t(62953),t(96540)),s=t(42358),r=t(24609),A=t(49916),c=t(19186),l=t(99728),d=t(12470),u=t(24155),h=t(6304),m=t(74891),g=t(74848);const p=["rooms","isSidebar","spaceSlug","currentRoomId","open"],f=["isSidebar","visibleRoomsCount"];function y(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function b(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?y(Object(t),!0).forEach((function(n){(0,i.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):y(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const E=(0,m.A)(s.Flex),w=(e,n)=>{const t=e.name.toLowerCase(),o=n.name.toLowerCase();return e.untouchable&&!n.untouchable?-1:!e.untouchable&&n.untouchable?1:e.isMember&&!n.isMember?-1:!e.isMember&&n.isMember?1:t<o?-1:t>o?1:0},B=(C=d.A,e=>{const n=(0,a.useMemo)((()=>e.isMember?{}:{textColor:"disabled",tooltip:"You\u2019re not a member of this room"}),[e.isMember]);return(0,g.jsx)(C,b(b({},e),n))});var C;const M=e=>{let{rooms:n=[],isSidebar:t,spaceSlug:i,currentRoomId:a,open:r}=e,A=(0,o.A)(e,p);const[c,l]=(0,h.A)(r);return n.length?(0,g.jsxs)(g.Fragment,{children:[c?n.map((e=>{let{id:n,isAgent:o,isMember:r}=e;return(0,g.jsx)(B,b({id:n,hideAlerts:!t,Wrapper:s.Text,isSidebar:t,spaceSlug:i,selectedId:a,isAgent:o,isMember:r},A),n)})):null,(0,g.jsxs)(E,{alignItems:"center",justifyContent:"center",gap:1,onClick:l,cursor:"pointer",border:{side:"top",color:"border"},margin:[1,0,0,0],padding:[1,0,0,0],tooltip:c?"":"View all rooms",tooltipProps:{align:"bottom"},children:[(0,g.jsx)(s.TextSmall,{color:"menuItem",children:c?"Show less":"Show more"}),(0,g.jsx)(s.Icon,{name:"chevron_down",size:"small",color:"menuItem",rotate:c?2:0})]})]}):null},T=12,I=()=>(0,g.jsx)(s.Flex,{column:!0,gap:2,padding:[2],children:Array.from(Array(10).keys()).map((e=>(0,g.jsx)(u.A,{height:"18px"},e)))}),v=e=>{let{isSidebar:n,visibleRoomsCount:t=T}=e,i=(0,o.A)(e,f);const d=(0,r.bq)(),u=(0,A.DL)(),{id:h=""}=(0,c.pr)(),m=(0,A.Y7)("loaded"),p=(0,c.ID)()||h,y=(0,l.JT)("room:ReadAll"),{top:E,rest:C}=(0,a.useMemo)((()=>(y?u:u.filter((e=>{let{isMember:n}=e;return n}))).sort(w).reduce(((e,n,o)=>b(b({},e),o<t?{top:[...e.top,n]}:{rest:[...e.rest,n]})),{top:[],rest:[]})),[y,u]),v=(0,a.useMemo)((()=>C.some((e=>{let{id:n}=e;return n===p}))),[C,p]);return!0===m&&p?(0,g.jsxs)(g.Fragment,{children:[E.map((e=>{let{id:t,isAgent:o,isMember:a}=e;return(0,g.jsx)(B,b({id:t,hideAlerts:!n,Wrapper:s.Text,isSidebar:n,spaceSlug:d,selectedId:p,isAgent:o,isMember:a},i),t)})),C.length?(0,g.jsx)(M,b({rooms:C,isSidebar:n,spaceSlug:d,currentRoomId:p,open:v},i)):null]}):(0,g.jsx)(I,{})}},64602(e,n,t){"use strict";t.d(n,{O:()=>s,v:()=>a});var o=t(51510),i=t(42358);const a=(0,o.default)(i.Icon).withConfig({displayName:"styled__StyledIcon",componentId:"sc-i0gfkp-0"})(["transform:",";"],(e=>{let{right:n}=e;return n?"rotate(0)":"rotate(180deg)"})),s=(0,o.default)(i.Button).withConfig({displayName:"styled__StyledButton",componentId:"sc-i0gfkp-1"})(["&&{padding:2px 16px;font-size:12px;height:auto;width:auto;min-width:96px;}"])},24655(e,n,t){"use strict";t.d(n,{A:()=>E});var o=t(64467),i=t(80045),a=(t(98992),t(54520),t(3949),t(62953),t(96540)),s=t(51510),r=t(41344),A=t(42358),c=t(45087),l=t(24609),d=t(7542),u=t(74848);const h=["active","showFullname"],m=["active","spaceId","testIdPrefix","local","onClick","showFullname","color"];function g(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function p(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?g(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):g(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const f=[],y=(0,s.default)(A.Flex).attrs((e=>{let{active:n,showFullname:t}=e;return p({width:t?"auto":7,height:7,background:n?"spaceSelected":"spaceIdle",justifyContent:"center",alignItems:"center",round:1,padding:t?[0,1]:[0],margin:t?[0,0,1]:[0]},(0,i.A)(e,h))})).withConfig({displayName:"spaceLabel__SpaceBox",componentId:"sc-1e67mnq-0"})(["background-color:",";cursor:pointer;&:hover{filter:brightness(140%);}"],(e=>{let{backgroundColor:n}=e;return n||""})),b=(0,s.default)(A.TextSmall).withConfig({displayName:"spaceLabel__StyledText",componentId:"sc-1e67mnq-1"})(["",""],(e=>{let{backgroundColor:n}=e;return n?"color: ".concat((0,d.bJ)(n),";"):""})),E=(w=e=>{let{active:n,spaceId:t,testIdPrefix:o,local:s=!1,onClick:c,showFullname:d,color:h}=e,g=(0,i.A)(e,m);const E=(0,r.Zp)(),w=(0,l.ns)(t),[B,C]=(e=>{if(!e)return f;const n=e.split(" "),[t,o]=n;return[t[0],o?o[0]:""]})(w.name),M=(0,a.useCallback)((()=>c?c(w):E(s?"/overview":"/spaces/".concat(w.slug))),[w.slug,s,c]);return(0,u.jsx)(y,p(p({active:n,"data-testid":"".concat(o||"spaceLabel-space","-").concat(w.slug),onClick:M},g),{},{showFullname:d,children:s?(0,u.jsx)(A.Icon,{name:"node",color:n?"textFocus":"textLite"}):d?(0,u.jsx)(A.Text,{strong:!0,color:n?"textFocus":h||"textNoFocus",children:w.name}):(0,u.jsxs)(u.Fragment,{children:[(0,u.jsx)(b,{strong:!0,color:n?"key":h||"text",backgroundColor:g.backgroundColor,children:B}),(0,u.jsx)(b,{strong:!0,color:n?"textLite":h||(g.backgroundColor?"text":"textNoFocus"),backgroundColor:g.backgroundColor,children:C})]})}))},e=>{const n=(0,l.ns)(e.spaceId),t=(0,a.useMemo)((()=>p({width:"100%",justifyContent:"center",padding:[2,0]},e.active?{background:"sideBar"}:{})),[e.active]);return e.showFullname?(0,u.jsx)(w,p({},e)):(0,u.jsx)(c.A,{content:null===n||void 0===n?void 0:n.name,align:"right",isBasic:!0,children:(0,u.jsx)(A.Flex,p(p({"data-testid":"spacebox-tooltip-container"},t),{},{children:(0,u.jsx)(w,p({},e))}))})});var w},34412(e,n,t){"use strict";t.d(n,{A:()=>f,D:()=>g});t(98992),t(54520),t(3949);var o=t(80045),i=t(64467),a=t(51510),s=t(42358),r=t(79748),A=t(74848);const c=["children"],l=["children","href"],d=["children"];function u(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function h(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?u(Object(t),!0).forEach((function(n){(0,i.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):u(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const m=a.default.article.withConfig({displayName:"markdocSchema__StyledArticle",componentId:"sc-c3znqb-0"})(["padding:0;"]),g=(0,a.default)(s.Text).attrs((e=>h({color:"main",fontSize:"14px"},e))).withConfig({displayName:"markdocSchema__StyledText",componentId:"sc-c3znqb-1"})([""]),p=(0,a.default)(r.A).attrs((e=>h({color:"main",fontSize:"14px"},e))).withConfig({displayName:"markdocSchema__StyledAnchor",componentId:"sc-c3znqb-2"})(["&:hover{color:"," !important;}"],(0,s.getColor)("main")),f={document:{render:m},paragraph:{render:e=>{let{children:n}=e,t=(0,o.A)(e,c);return(0,A.jsx)(s.Flex,h(h({padding:[1,0]},t),{},{children:(0,A.jsx)(g,{children:n})}))}},link:{render:e=>{let{children:n,href:t}=e,i=(0,o.A)(e,l);return(0,A.jsx)(p,h(h({href:t,rel:"noopener noreferrer",target:"_blank"},i),{},{children:n}))},attributes:{href:{type:String}}},strong:{render:e=>{let{children:n}=e,t=(0,o.A)(e,d);return(0,A.jsx)(g,h(h({strong:!0,fontSize:"14px"},t),{},{children:n}))}}}},40298(e,n,t){"use strict";t.d(n,{cW:()=>D,gg:()=>Q,Vt:()=>x});var o=t(64467),i=(t(98992),t(54520),t(3949),t(81454),t(25509),t(65223),t(60321),t(41927),t(11632),t(64377),t(66771),t(12516),t(68931),t(52514),t(35694),t(52774),t(49536),t(21926),t(94483),t(16215),t(62953),t(96540)),a=t(52035),s=t(34843),r=t(18790),A=t(30569);const c={id:"",email:""},l=(0,r.I)((()=>(0,a.eU)([]))),d=(0,r.I)((()=>(0,a.eU)(c)));var u=t(80045),h=t(91130);const m=["role"];function g(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}const p=e=>e.map((e=>{let{id:n,email:t}=e;return{id:n,email:t}})),f={member:1,admin:2,manager:3,troubleshooter:4,observer:5,billing:6},y=e=>e.map((e=>{let{role:n}=e,t=(0,u.A)(e,m);if(void 0===f[n])throw new Error("role not found");return function(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?g(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):g(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}({role:f[n]},t)}));t(8872);var b=t(16866);const E=e=>{let{data:n,invitations:t}=e;return n.reduce(((e,n,o)=>{var i,a;(a=n.errorMsgKey)&&a===b.vK&&(null!==(i=t[o])&&void 0!==i&&i.email&&e.push(t[o].email));return e}),[])},w=e=>e.length>1;var B=t(24609),C=t(57377);function M(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function T(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?M(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):M(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const I=(0,r.I)((e=>{let{id:n,key:t}=e;return(0,a.eU)((e=>{const o=e(d(n));return t?o[t]:o}))}),C.Ay),v=(0,r.I)((e=>{let{ids:n,key:t}=e;return(0,a.eU)((e=>(n||[]).map((n=>{const o=e(I({id:n}));return t?o[t]:o}))))}),C.Ay),_=(0,r.I)((e=>(0,a.eU)((n=>n(l(e))),((n,t,o)=>{let{invitations:i,merge:a}=o;t(l(e),(e=>[...new Set([...e,...i.map((e=>{let{id:n}=e;return n}))])])),i.forEach((e=>{t(d(e.id),(n=>T(T({},a&&n),e)))}))})))),Q=e=>{const n=(0,s.md)(_(e)),[t,o]=(0,i.useState)(!1),a=(0,A.yF)((0,i.useCallback)((async(n,t)=>{if(!(await n(_(e))).length){const{data:n}=await(e=>h.A.get("/api/v2/spaces/".concat(e,"/invitations"),{transform:p}))(e);t(_(e),{invitations:n,merge:!0})}}),[e])),r=(0,i.useCallback)((async(n,t,i)=>{let{onSuccess:a,onError:s}=i;try{o(!0);const{data:i}=await((e,n,t)=>h.A.post("/api/v1/spaces/".concat(e,"/invitations"),{redirectURI:t,requests:y(n)}))(e,n,t),r=E({data:i,invitations:n});if(r.length&&!w(n))return void(s&&s({message:"User has already joined that space!"}));if(r.length&&w(n)){if(r.length===n.length)return void(s&&s({message:"All of the selected users are already meembers of this space"}));if(a)return void a({header:"Invitations partially send",text:"Some of the selected users are already members of this space"})}a&&a()}catch(r){s&&s(r)}finally{o(!1)}}),[e]),u=(0,A.yF)((0,i.useCallback)((async(n,t,o)=>{const i=await n(_(e)),a=i.filter((e=>e===o));t(l(e),(e=>{const n=new Set(e);return a.forEach((e=>n.delete(e))),[...n]}));try{await((e,n)=>h.A.delete("/api/v1/spaces/".concat(e,"/invitations"),{params:{invitation_ids:n.join(",")}}))(e,[o]),a.forEach((e=>{t(d(e),c)}))}catch(s){t(_(e),{invitations:i,merge:!1})}}),[e]));return(0,i.useEffect)((()=>{a()}),[e,a]),[n,u,r,t]},D=(e,n)=>(0,s.md)(I({id:e,key:n})),x=()=>(e=>{let{key:n}=e;const t=(0,B.vt)(),o=(0,s.md)(_(t));return(0,s.md)(v({ids:o,key:n}))})({key:"email"})},91370(e,n,t){"use strict";t.d(n,{A:()=>A});t(25509),t(65223),t(60321),t(41927),t(11632),t(64377),t(66771),t(12516),t(68931),t(52514),t(35694),t(52774),t(49536),t(21926),t(94483),t(16215),t(62953);var o=t(96540),i=t(30569),a=t(27965),s=t(24609),r=t(15624);const A=e=>{let{onSuccess:n,onError:t,isDefault:A=!1}=e;return(0,i.yF)((0,o.useCallback)((async(e,o,i)=>{let{name:c,userId:l,email:d}=i;try{const{data:e}=A?await(0,a.qi)(l,d):await(0,a.bz)(c),{id:t,slug:i,name:u}=e;o((0,r.Ay)(t),{id:t,slug:i,name:A?u:c,loaded:!0}),o((0,r.aR)(i),t),o((0,s.nC)("ids"),(e=>e=[...new Set([...e,t])])),o((0,s.nC)("loaded"),!0),n&&n(e)}catch(u){t&&t()}}),[n,t]))}},64999(e,n,t){"use strict";t.d(n,{A:()=>m});t(98992),t(54520),t(62953);var o=t(96540),i=t(84929),a=t(30569),s=t(63872),r=t(27965),A=t(99236),c=t(24609),l=t(15624),d=t(32874),u=t(86706),h=t(49916);const m=e=>{const[n,t]=(0,s.A)();return(0,a.yF)((0,o.useCallback)((async(o,a,s)=>{let{onSuccess:m,onError:g}=s;const{name:p}=await o((0,l.Ay)(e)),f=await o((0,c.nC)("ids")),y=f.filter((n=>e!==n)),b=e=>{a(e,i.Ut)},[E]=y;if(!E)return void t({header:"Spaces",text:A.sh.delete});const{slug:w}=E&&await o((0,l.Ay)(E));a((0,c.nC)("ids"),y),a(d.A,(n=>n.filter((n=>n!==e))));try{await(0,r.cN)(e),m&&m(w),(0,u.Z8)(b,e),(0,h.Is)(b,e),b((0,l.Ay)(e)),n({header:"Space deleted",text:'You have successfully deleted "'.concat(p,'" space')})}catch(B){a((0,c.nC)("ids"),f),t(B),g&&g()}}),[e,n,t]))}},3561(e,n,t){"use strict";t.d(n,{A:()=>d});var o=t(64467),i=(t(98992),t(54520),t(3949),t(62953),t(96540)),a=t(30569),s=t(24609),r=t(27965),A=t(63872);function c(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);n&&(o=o.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,o)}return t}function l(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?c(Object(t),!0).forEach((function(n){(0,o.A)(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):c(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}const d=e=>{const[n,t]=(0,A.A)();return(0,a.yF)((0,i.useCallback)((async(o,i,a,A)=>{let{onSuccess:c,onFail:d}=A;const u=await o((0,s.U2)({id:e}));i((0,s.U2)({id:e}),(e=>l(l({},e),a)));try{await(0,r.Yk)(e,a),n({header:"Space successfully updated!"}),c&&c()}catch(h){i((0,s.U2)({id:e}),u),t(h),d&&d()}}),[]))}},53949(e,n,t){"use strict";t.d(n,{A:()=>s});t(62953);var o=t(96540),i=t(3319);const a=()=>"dissmissed-banner",s=e=>{let{getLocalStorageKey:n=a,logKey:t}=e;const[s,r]=(0,o.useState)(!0),{sendButtonClickedLog:A}=(0,i.A)();(0,o.useEffect)((()=>{const e="true"===localStorage.getItem(n());r(e)}),[n]);return{dismissed:s,onClose:(0,o.useCallback)((()=>{localStorage.setItem(n(),!0),r(!0),t&&A({feature:t})}),[r,A,n])}}},5722(e,n,t){"use strict";t.d(n,{A:()=>A});var o=t(96540),i=t(24609),a=t(37617),s=t(14457),r=t(39175);const A=()=>{const e=(0,i.vt)(),n=(0,r.Az)(e),t=(0,i.ns)(e,"isEmpty"),{isOnboardingPath:A}=(0,a.A)(),{isIntegrationsPath:c}=(0,s.Q)(),l=(0,o.useMemo)((()=>!1===t&&!A&&!c),[t,A,c]);return!!n||l}},85660(e,n,t){"use strict";t.d(n,{A:()=>a});t(62953);var o=t(96540);const i={x:0,y:0,width:0,height:0,top:0,left:0,bottom:0,right:0},a=function(){let{callback:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const[n,t]=(0,o.useState)(null),[a,s]=(0,o.useState)(i),r=(0,o.useMemo)((()=>new window.ResizeObserver((n=>{if(n[0]){const{x:t,y:o,width:i,height:a,top:r,left:A,bottom:c,right:l}=n[0].contentRect;s({x:t,y:o,width:i,height:a,top:r,left:A,bottom:c,right:l}),"function"===typeof e&&e(n[0].contentRect)}}))),[e]);return(0,o.useLayoutEffect)((()=>{if(n)return r.observe(n),()=>{r.disconnect()}}),[n]),[t,a]}},63084(e,n,t){"use strict";t.d(n,{A:()=>c});var o=t(96540),i=t(49181),a=t(10602),s=t(24609),r=t(49916),A=t(72884);const c=()=>{const e=(0,A.t)(),n=(0,r.WW)(),t=(0,s.vt)(),c=(0,a.nl)(t,n);return(0,o.useCallback)((async function(o){let{onSuccess:a,onFail:s}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};await c(o,{onSuccess:o=>{(0,i.tv)({cacheKeyPrefix:e,nodeIds:o,roomIds:n,spaceId:t}),null===a||void 0===a||a()},onFail:s})}),[c])}},72679(e,n,t){"use strict";t.d(n,{A:()=>r});var o=t(41344),i=t(96540),a=t(24609),s=t(80372);const r=function(){let{roomSlug:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const n=(0,a.bq)(),t=(0,o.Zp)(),{pathname:r}=(0,o.zy)(),A="".concat(r);return(0,i.useCallback)((()=>{const i=e?(0,o.tW)(s.uX,{spaceSlug:n,settingsTab:s.aj,settingsSubTab:e}):"/spaces/".concat(n,"/settings");t(i,{state:{previousUrlPath:A}})}),[A,t,e,n])}},4701(e,n,t){"use strict";t.d(n,{A:()=>r});var o=t(19186),i=t(10602),a=t(49181),s=t(72884);const r=e=>{const n=(0,s.t)(),t=(0,o.ID)(),r=(0,o.wz)(e||t,"spaceId");return(0,i.vV)(r,e||t,{onSuccess:o=>(0,a.gm)({cacheKeyPrefix:n,nodeIds:o,roomId:e||t,spaceId:r})})}},54391(e){function n(e){var n=new Error("Cannot find module '"+e+"'");throw n.code="MODULE_NOT_FOUND",n}n.keys=()=>[],n.resolve=n,n.id=54391,e.exports=n},50477(){}}]);